partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
BatchRun._run
|
Distributed process
|
libplanarradpy/planrad.py
|
def _run(self, run_dir):
"""Distributed process"""
# Check to see if the required run_params files exist, if they dont use the tools to generate them
# --------------------------------------------------#
# HERE WE RECREATE OUR RUN_PARAMS OBJECT FROM
# THE RUN FILE WE WROTE TO DISK EARLIER
# --------------------------------------------------#
file_tools = FileTools()
run_dict = file_tools.read_param_file_to_dict(os.path.join(run_dir, 'batch.txt'))
#print(run_dict['band_centres_data'])
#self.run_params.wavelengths = run_dict['wavelengths']
#run_params = RunParameters()
#run_params = file_tools.dict_to_object(run_params, run_dict)
#------------------------------------------------#
# Sky inputs
#------------------------------------------------#
#lg.debug(run_dict.keys())
#self.run_params.update_filenames()
#lg.debug('!!!!!!!!!' + run_dict['sky_fp'])
if os.path.isfile(run_dict['sky_fp']):
sky_file_exists = True
lg.info('Found sky_tool generated file' + run_dict['sky_fp'])
else:
lg.info('No sky_tool generated file, generating one')
#try:
inp_file = run_dict['sky_fp'] + '_params.txt'
#self.run_params.sky_file = inp_file
self.run_params.write_sky_params_to_file()
#if not os.path.isfile(inp_file):
# lg.error(inp_file + ' : is not a valid parameter file')
lg.debug('Runing skytool' + os.path.join(self.run_params.exec_path, 'skytool_free') + '#')
lg.debug(os.path.join(self.run_params.exec_path, 'skytool_free') + ' params=' + inp_file)
os.system(os.path.join(self.run_params.exec_path, 'skytool_free') + ' params=' + inp_file)
#except OSError:
# lg.exception('Cannot execute PlannarRad, cannot find executable file to skytool_free')
#------------------------------------------------#
# Water surface inputs
#------------------------------------------------#
if os.path.isfile(run_dict['water_surface_fp']):
surface_file_exists = True
lg.info('Found surf_tool generated file' + run_dict['water_surface_fp'])
else:
lg.info('No surf_tool generated file, generating one')
try:
inp_file = run_dict['water_surface_fp'] + '_params.txt'
self.run_params.write_surf_params_to_file()
if not os.path.isfile(inp_file):
lg.error(inp_file + ' : is not a valid parameter file')
os.system(os.path.join(self.run_params.exec_path, 'surftool_free') + ' params=' + inp_file)
except OSError:
lg.exception('Cannot execute PlannarRad, cannot find executable file to surftool_free')
#------------------------------------------------#
# Phase functions inputs
#------------------------------------------------#
if os.path.isfile(run_dict['pf_fp']):
phase_file_exists = True
lg.info('Found phase_tool generated file' + run_dict['pf_fp'])
else:
lg.info('No sky_tool generated file, generating one')
try:
inp_file = run_dict['pf_fp'] + '_params.txt'
self.run_params.write_phase_params_to_file()
if not os.path.isfile(inp_file):
lg.error(inp_file + ' : is not a valid parameter file')
os.system(os.path.join(self.run_params.exec_path, 'phasetool_free') + ' params=' + inp_file)
except OSError:
lg.exception('Cannot execute PlannarRad, cannot find executable file to phasetool_free')
#------------------------------------------------#
# slabtool inputs [Run planarrad]
#------------------------------------------------#
inp_file = run_dict['name']
if not os.path.isfile(inp_file):
lg.error(inp_file + ' : is not a valid batch file')
try:
os.system(os.path.join(self.run_params.exec_path, 'slabtool_free') + ' params=' + inp_file)
except OSError:
lg.exception('Cannot execute PlannarRad, cannot find executable file to slabtool_free')
|
def _run(self, run_dir):
"""Distributed process"""
# Check to see if the required run_params files exist, if they dont use the tools to generate them
# --------------------------------------------------#
# HERE WE RECREATE OUR RUN_PARAMS OBJECT FROM
# THE RUN FILE WE WROTE TO DISK EARLIER
# --------------------------------------------------#
file_tools = FileTools()
run_dict = file_tools.read_param_file_to_dict(os.path.join(run_dir, 'batch.txt'))
#print(run_dict['band_centres_data'])
#self.run_params.wavelengths = run_dict['wavelengths']
#run_params = RunParameters()
#run_params = file_tools.dict_to_object(run_params, run_dict)
#------------------------------------------------#
# Sky inputs
#------------------------------------------------#
#lg.debug(run_dict.keys())
#self.run_params.update_filenames()
#lg.debug('!!!!!!!!!' + run_dict['sky_fp'])
if os.path.isfile(run_dict['sky_fp']):
sky_file_exists = True
lg.info('Found sky_tool generated file' + run_dict['sky_fp'])
else:
lg.info('No sky_tool generated file, generating one')
#try:
inp_file = run_dict['sky_fp'] + '_params.txt'
#self.run_params.sky_file = inp_file
self.run_params.write_sky_params_to_file()
#if not os.path.isfile(inp_file):
# lg.error(inp_file + ' : is not a valid parameter file')
lg.debug('Runing skytool' + os.path.join(self.run_params.exec_path, 'skytool_free') + '#')
lg.debug(os.path.join(self.run_params.exec_path, 'skytool_free') + ' params=' + inp_file)
os.system(os.path.join(self.run_params.exec_path, 'skytool_free') + ' params=' + inp_file)
#except OSError:
# lg.exception('Cannot execute PlannarRad, cannot find executable file to skytool_free')
#------------------------------------------------#
# Water surface inputs
#------------------------------------------------#
if os.path.isfile(run_dict['water_surface_fp']):
surface_file_exists = True
lg.info('Found surf_tool generated file' + run_dict['water_surface_fp'])
else:
lg.info('No surf_tool generated file, generating one')
try:
inp_file = run_dict['water_surface_fp'] + '_params.txt'
self.run_params.write_surf_params_to_file()
if not os.path.isfile(inp_file):
lg.error(inp_file + ' : is not a valid parameter file')
os.system(os.path.join(self.run_params.exec_path, 'surftool_free') + ' params=' + inp_file)
except OSError:
lg.exception('Cannot execute PlannarRad, cannot find executable file to surftool_free')
#------------------------------------------------#
# Phase functions inputs
#------------------------------------------------#
if os.path.isfile(run_dict['pf_fp']):
phase_file_exists = True
lg.info('Found phase_tool generated file' + run_dict['pf_fp'])
else:
lg.info('No sky_tool generated file, generating one')
try:
inp_file = run_dict['pf_fp'] + '_params.txt'
self.run_params.write_phase_params_to_file()
if not os.path.isfile(inp_file):
lg.error(inp_file + ' : is not a valid parameter file')
os.system(os.path.join(self.run_params.exec_path, 'phasetool_free') + ' params=' + inp_file)
except OSError:
lg.exception('Cannot execute PlannarRad, cannot find executable file to phasetool_free')
#------------------------------------------------#
# slabtool inputs [Run planarrad]
#------------------------------------------------#
inp_file = run_dict['name']
if not os.path.isfile(inp_file):
lg.error(inp_file + ' : is not a valid batch file')
try:
os.system(os.path.join(self.run_params.exec_path, 'slabtool_free') + ' params=' + inp_file)
except OSError:
lg.exception('Cannot execute PlannarRad, cannot find executable file to slabtool_free')
|
[
"Distributed",
"process"
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/libplanarradpy/planrad.py#L643-L728
|
[
"def",
"_run",
"(",
"self",
",",
"run_dir",
")",
":",
"# Check to see if the required run_params files exist, if they dont use the tools to generate them",
"# --------------------------------------------------#",
"# HERE WE RECREATE OUR RUN_PARAMS OBJECT FROM",
"# THE RUN FILE WE WROTE TO DISK EARLIER",
"# --------------------------------------------------#",
"file_tools",
"=",
"FileTools",
"(",
")",
"run_dict",
"=",
"file_tools",
".",
"read_param_file_to_dict",
"(",
"os",
".",
"path",
".",
"join",
"(",
"run_dir",
",",
"'batch.txt'",
")",
")",
"#print(run_dict['band_centres_data'])",
"#self.run_params.wavelengths = run_dict['wavelengths']",
"#run_params = RunParameters()",
"#run_params = file_tools.dict_to_object(run_params, run_dict)",
"#------------------------------------------------#",
"# Sky inputs",
"#------------------------------------------------#",
"#lg.debug(run_dict.keys())",
"#self.run_params.update_filenames()",
"#lg.debug('!!!!!!!!!' + run_dict['sky_fp'])",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"run_dict",
"[",
"'sky_fp'",
"]",
")",
":",
"sky_file_exists",
"=",
"True",
"lg",
".",
"info",
"(",
"'Found sky_tool generated file'",
"+",
"run_dict",
"[",
"'sky_fp'",
"]",
")",
"else",
":",
"lg",
".",
"info",
"(",
"'No sky_tool generated file, generating one'",
")",
"#try:",
"inp_file",
"=",
"run_dict",
"[",
"'sky_fp'",
"]",
"+",
"'_params.txt'",
"#self.run_params.sky_file = inp_file",
"self",
".",
"run_params",
".",
"write_sky_params_to_file",
"(",
")",
"#if not os.path.isfile(inp_file):",
"# lg.error(inp_file + ' : is not a valid parameter file')",
"lg",
".",
"debug",
"(",
"'Runing skytool'",
"+",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"run_params",
".",
"exec_path",
",",
"'skytool_free'",
")",
"+",
"'#'",
")",
"lg",
".",
"debug",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"run_params",
".",
"exec_path",
",",
"'skytool_free'",
")",
"+",
"' params='",
"+",
"inp_file",
")",
"os",
".",
"system",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"run_params",
".",
"exec_path",
",",
"'skytool_free'",
")",
"+",
"' params='",
"+",
"inp_file",
")",
"#except OSError:",
"# lg.exception('Cannot execute PlannarRad, cannot find executable file to skytool_free')",
"#------------------------------------------------#",
"# Water surface inputs",
"#------------------------------------------------#",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"run_dict",
"[",
"'water_surface_fp'",
"]",
")",
":",
"surface_file_exists",
"=",
"True",
"lg",
".",
"info",
"(",
"'Found surf_tool generated file'",
"+",
"run_dict",
"[",
"'water_surface_fp'",
"]",
")",
"else",
":",
"lg",
".",
"info",
"(",
"'No surf_tool generated file, generating one'",
")",
"try",
":",
"inp_file",
"=",
"run_dict",
"[",
"'water_surface_fp'",
"]",
"+",
"'_params.txt'",
"self",
".",
"run_params",
".",
"write_surf_params_to_file",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"inp_file",
")",
":",
"lg",
".",
"error",
"(",
"inp_file",
"+",
"' : is not a valid parameter file'",
")",
"os",
".",
"system",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"run_params",
".",
"exec_path",
",",
"'surftool_free'",
")",
"+",
"' params='",
"+",
"inp_file",
")",
"except",
"OSError",
":",
"lg",
".",
"exception",
"(",
"'Cannot execute PlannarRad, cannot find executable file to surftool_free'",
")",
"#------------------------------------------------#",
"# Phase functions inputs",
"#------------------------------------------------#",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"run_dict",
"[",
"'pf_fp'",
"]",
")",
":",
"phase_file_exists",
"=",
"True",
"lg",
".",
"info",
"(",
"'Found phase_tool generated file'",
"+",
"run_dict",
"[",
"'pf_fp'",
"]",
")",
"else",
":",
"lg",
".",
"info",
"(",
"'No sky_tool generated file, generating one'",
")",
"try",
":",
"inp_file",
"=",
"run_dict",
"[",
"'pf_fp'",
"]",
"+",
"'_params.txt'",
"self",
".",
"run_params",
".",
"write_phase_params_to_file",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"inp_file",
")",
":",
"lg",
".",
"error",
"(",
"inp_file",
"+",
"' : is not a valid parameter file'",
")",
"os",
".",
"system",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"run_params",
".",
"exec_path",
",",
"'phasetool_free'",
")",
"+",
"' params='",
"+",
"inp_file",
")",
"except",
"OSError",
":",
"lg",
".",
"exception",
"(",
"'Cannot execute PlannarRad, cannot find executable file to phasetool_free'",
")",
"#------------------------------------------------#",
"# slabtool inputs [Run planarrad]",
"#------------------------------------------------#",
"inp_file",
"=",
"run_dict",
"[",
"'name'",
"]",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"inp_file",
")",
":",
"lg",
".",
"error",
"(",
"inp_file",
"+",
"' : is not a valid batch file'",
")",
"try",
":",
"os",
".",
"system",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"run_params",
".",
"exec_path",
",",
"'slabtool_free'",
")",
"+",
"' params='",
"+",
"inp_file",
")",
"except",
"OSError",
":",
"lg",
".",
"exception",
"(",
"'Cannot execute PlannarRad, cannot find executable file to slabtool_free'",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
BatchRun.generate_directories
|
For all possible combinations of 'batchable' parameters. create a unique directory to story outputs
Each directory name is unique and contains the run parameters in the directory name
:param overwrite: If set to True will over write all files default = False
|
libplanarradpy/planrad.py
|
def generate_directories(self, overwrite=False):
"""For all possible combinations of 'batchable' parameters. create a unique directory to story outputs
Each directory name is unique and contains the run parameters in the directory name
:param overwrite: If set to True will over write all files default = False
"""
if not os.path.exists(self.batch_output):
try:
lg.info('Creating batch project directory')
if self.batch_output == self.run_params.output_path + 'batch':
lg.warning('Using default project name. Consider renaming!')
os.makedirs(self.batch_output)
except OSError:
lg.exception('Could not create project directory')
elif os.path.exists(self.batch_output) and overwrite == True:
try:
lg.info('Creating batch project directory')
lg.warning('Overwriting existing directories')
if self.batch_output == self.run_params.output_path + 'batch':
lg.warning('Using default project name. Consider renaming!')
os.makedirs(self.batch_output)
except OSError:
lg.exception('Could not create project directory')
# --------------------------------------------------#
# GENERATE ALL THE IOPS FROM BIOP
# --------------------------------------------------#
#--------------------------------------------------#
# WRITE EACH BIOP TO CSV FILE INTO THE INPUT
# DIRECTORY IF IT DOESNT EXIST
#--------------------------------------------------#
#--------------------------------------------------#
# GENERATE A LIST OF ALL COMBINATIONS OF BIOPS
#--------------------------------------------------#
#--------------------------------------------------#
# WRITE THE DIRECTORIES FOR EACH BIOP AND NAME APPROPRIATELY
# DON'T OVERWRITE IF THEY EXIST ALREADY
#--------------------------------------------------#
self.bio_params.read_pure_water_absorption_from_file(
self.run_params.pure_water_absorption_file)
self.bio_params.read_pure_water_scattering_from_file(
self.run_params.pure_water_scattering_file)
self.bio_params.read_aphi_from_file(self.run_params.phytoplankton_absorption_file)
for saa in self.saa_list:
# update the saa in the run file & the todo filename!
self.run_params.sky_aziumth = saa
self.run_params.sky_file = os.path.abspath(
os.path.join(os.path.join(self.run_params.input_path, 'sky_files'),
'sky_' + self.run_params.sky_state + '_z' + str(self.run_params.sky_zenith) + '_a' + str(
self.run_params.sky_azimuth) + '_' + str(
self.run_params.num_bands) + '_' + self.run_params.ds_code))
for sza in self.sza_list:
# update the saz in the run file
self.run_params.sky_zenith = sza
self.run_params.sky_file = os.path.abspath(
os.path.join(os.path.join(self.run_params.input_path, 'sky_files'),
'sky_' + self.run_params.sky_state + '_z' + str(
self.run_params.sky_zenith) + '_a' + str(self.run_params.sky_azimuth) + '_' + str(
self.run_params.num_bands) + '_' + self.run_params.ds_code))
for p in self.p_list:
for x in self.x_list:
for y in self.y_list:
for g in self.g_list:
for s in self.s_list:
for z in self.z_list:
file_name = 'SAA' + str(saa) + '_SZA' + str(sza) + '_P' + str(p) + '_X' + str(
x) + '_Y' + str(y) + '_G' + str(g) + '_S' + str(s) + '_Z' + str(z)
dir_name = os.path.join(self.batch_output, file_name)
self.run_params.output_path = dir_name
#--------------------------------------------------#
# UPDATE THE IOP PARAMETERS FOR THE RUN FILE
#--------------------------------------------------#
self.run_params.sky_azimuth = saa
self.run_params.sky_zenith = sza
self.run_params.depth = z
self.bio_params.build_bbp(x, y) # todo add wave const as a kwarg
self.bio_params.build_a_cdom(g, s)
# Need to re-read the file as it was scaled in a the other run!
self.bio_params.read_aphi_from_file(
self.run_params.phytoplankton_absorption_file)
self.bio_params.scale_aphi(p)
self.bio_params.build_all_iop()
self.run_params.scattering_file = os.path.join(
os.path.join(self.run_params.input_path, 'iop_files'), 'b_' + file_name)
self.bio_params.write_b_to_file(self.run_params.scattering_file)
self.run_params.attenuation_file = os.path.join(
os.path.join(self.run_params.input_path, 'iop_files'), 'c_' + file_name)
self.bio_params.write_c_to_file(self.run_params.attenuation_file)
self.run_params.project_file = os.path.join(dir_name, 'batch.txt')
self.run_params.report_file = os.path.join(dir_name, 'report.txt')
self.run_params.write_sky_params_to_file()
self.run_params.write_surf_params_to_file()
self.run_params.write_phase_params_to_file()
if not os.path.exists(dir_name):
try:
lg.info('Creating run directory')
os.makedirs(dir_name)
self.run_params.write_run_parameters_to_file()
except OSError:
lg.exception('Could not create run directory')
elif os.path.exists(dir_name) and overwrite == True:
try:
lg.info('Creating run directory')
lg.warning('Overwriting existing directories')
os.makedirs(dir_name)
self.run_params.write_run_parameters_to_file()
except OSError:
lg.exception('Could not create run directory')
|
def generate_directories(self, overwrite=False):
"""For all possible combinations of 'batchable' parameters. create a unique directory to story outputs
Each directory name is unique and contains the run parameters in the directory name
:param overwrite: If set to True will over write all files default = False
"""
if not os.path.exists(self.batch_output):
try:
lg.info('Creating batch project directory')
if self.batch_output == self.run_params.output_path + 'batch':
lg.warning('Using default project name. Consider renaming!')
os.makedirs(self.batch_output)
except OSError:
lg.exception('Could not create project directory')
elif os.path.exists(self.batch_output) and overwrite == True:
try:
lg.info('Creating batch project directory')
lg.warning('Overwriting existing directories')
if self.batch_output == self.run_params.output_path + 'batch':
lg.warning('Using default project name. Consider renaming!')
os.makedirs(self.batch_output)
except OSError:
lg.exception('Could not create project directory')
# --------------------------------------------------#
# GENERATE ALL THE IOPS FROM BIOP
# --------------------------------------------------#
#--------------------------------------------------#
# WRITE EACH BIOP TO CSV FILE INTO THE INPUT
# DIRECTORY IF IT DOESNT EXIST
#--------------------------------------------------#
#--------------------------------------------------#
# GENERATE A LIST OF ALL COMBINATIONS OF BIOPS
#--------------------------------------------------#
#--------------------------------------------------#
# WRITE THE DIRECTORIES FOR EACH BIOP AND NAME APPROPRIATELY
# DON'T OVERWRITE IF THEY EXIST ALREADY
#--------------------------------------------------#
self.bio_params.read_pure_water_absorption_from_file(
self.run_params.pure_water_absorption_file)
self.bio_params.read_pure_water_scattering_from_file(
self.run_params.pure_water_scattering_file)
self.bio_params.read_aphi_from_file(self.run_params.phytoplankton_absorption_file)
for saa in self.saa_list:
# update the saa in the run file & the todo filename!
self.run_params.sky_aziumth = saa
self.run_params.sky_file = os.path.abspath(
os.path.join(os.path.join(self.run_params.input_path, 'sky_files'),
'sky_' + self.run_params.sky_state + '_z' + str(self.run_params.sky_zenith) + '_a' + str(
self.run_params.sky_azimuth) + '_' + str(
self.run_params.num_bands) + '_' + self.run_params.ds_code))
for sza in self.sza_list:
# update the saz in the run file
self.run_params.sky_zenith = sza
self.run_params.sky_file = os.path.abspath(
os.path.join(os.path.join(self.run_params.input_path, 'sky_files'),
'sky_' + self.run_params.sky_state + '_z' + str(
self.run_params.sky_zenith) + '_a' + str(self.run_params.sky_azimuth) + '_' + str(
self.run_params.num_bands) + '_' + self.run_params.ds_code))
for p in self.p_list:
for x in self.x_list:
for y in self.y_list:
for g in self.g_list:
for s in self.s_list:
for z in self.z_list:
file_name = 'SAA' + str(saa) + '_SZA' + str(sza) + '_P' + str(p) + '_X' + str(
x) + '_Y' + str(y) + '_G' + str(g) + '_S' + str(s) + '_Z' + str(z)
dir_name = os.path.join(self.batch_output, file_name)
self.run_params.output_path = dir_name
#--------------------------------------------------#
# UPDATE THE IOP PARAMETERS FOR THE RUN FILE
#--------------------------------------------------#
self.run_params.sky_azimuth = saa
self.run_params.sky_zenith = sza
self.run_params.depth = z
self.bio_params.build_bbp(x, y) # todo add wave const as a kwarg
self.bio_params.build_a_cdom(g, s)
# Need to re-read the file as it was scaled in a the other run!
self.bio_params.read_aphi_from_file(
self.run_params.phytoplankton_absorption_file)
self.bio_params.scale_aphi(p)
self.bio_params.build_all_iop()
self.run_params.scattering_file = os.path.join(
os.path.join(self.run_params.input_path, 'iop_files'), 'b_' + file_name)
self.bio_params.write_b_to_file(self.run_params.scattering_file)
self.run_params.attenuation_file = os.path.join(
os.path.join(self.run_params.input_path, 'iop_files'), 'c_' + file_name)
self.bio_params.write_c_to_file(self.run_params.attenuation_file)
self.run_params.project_file = os.path.join(dir_name, 'batch.txt')
self.run_params.report_file = os.path.join(dir_name, 'report.txt')
self.run_params.write_sky_params_to_file()
self.run_params.write_surf_params_to_file()
self.run_params.write_phase_params_to_file()
if not os.path.exists(dir_name):
try:
lg.info('Creating run directory')
os.makedirs(dir_name)
self.run_params.write_run_parameters_to_file()
except OSError:
lg.exception('Could not create run directory')
elif os.path.exists(dir_name) and overwrite == True:
try:
lg.info('Creating run directory')
lg.warning('Overwriting existing directories')
os.makedirs(dir_name)
self.run_params.write_run_parameters_to_file()
except OSError:
lg.exception('Could not create run directory')
|
[
"For",
"all",
"possible",
"combinations",
"of",
"batchable",
"parameters",
".",
"create",
"a",
"unique",
"directory",
"to",
"story",
"outputs"
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/libplanarradpy/planrad.py#L730-L853
|
[
"def",
"generate_directories",
"(",
"self",
",",
"overwrite",
"=",
"False",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"batch_output",
")",
":",
"try",
":",
"lg",
".",
"info",
"(",
"'Creating batch project directory'",
")",
"if",
"self",
".",
"batch_output",
"==",
"self",
".",
"run_params",
".",
"output_path",
"+",
"'batch'",
":",
"lg",
".",
"warning",
"(",
"'Using default project name. Consider renaming!'",
")",
"os",
".",
"makedirs",
"(",
"self",
".",
"batch_output",
")",
"except",
"OSError",
":",
"lg",
".",
"exception",
"(",
"'Could not create project directory'",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"batch_output",
")",
"and",
"overwrite",
"==",
"True",
":",
"try",
":",
"lg",
".",
"info",
"(",
"'Creating batch project directory'",
")",
"lg",
".",
"warning",
"(",
"'Overwriting existing directories'",
")",
"if",
"self",
".",
"batch_output",
"==",
"self",
".",
"run_params",
".",
"output_path",
"+",
"'batch'",
":",
"lg",
".",
"warning",
"(",
"'Using default project name. Consider renaming!'",
")",
"os",
".",
"makedirs",
"(",
"self",
".",
"batch_output",
")",
"except",
"OSError",
":",
"lg",
".",
"exception",
"(",
"'Could not create project directory'",
")",
"# --------------------------------------------------#",
"# GENERATE ALL THE IOPS FROM BIOP",
"# --------------------------------------------------#",
"#--------------------------------------------------#",
"# WRITE EACH BIOP TO CSV FILE INTO THE INPUT",
"# DIRECTORY IF IT DOESNT EXIST",
"#--------------------------------------------------#",
"#--------------------------------------------------#",
"# GENERATE A LIST OF ALL COMBINATIONS OF BIOPS",
"#--------------------------------------------------#",
"#--------------------------------------------------#",
"# WRITE THE DIRECTORIES FOR EACH BIOP AND NAME APPROPRIATELY",
"# DON'T OVERWRITE IF THEY EXIST ALREADY",
"#--------------------------------------------------#",
"self",
".",
"bio_params",
".",
"read_pure_water_absorption_from_file",
"(",
"self",
".",
"run_params",
".",
"pure_water_absorption_file",
")",
"self",
".",
"bio_params",
".",
"read_pure_water_scattering_from_file",
"(",
"self",
".",
"run_params",
".",
"pure_water_scattering_file",
")",
"self",
".",
"bio_params",
".",
"read_aphi_from_file",
"(",
"self",
".",
"run_params",
".",
"phytoplankton_absorption_file",
")",
"for",
"saa",
"in",
"self",
".",
"saa_list",
":",
"# update the saa in the run file & the todo filename!",
"self",
".",
"run_params",
".",
"sky_aziumth",
"=",
"saa",
"self",
".",
"run_params",
".",
"sky_file",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"run_params",
".",
"input_path",
",",
"'sky_files'",
")",
",",
"'sky_'",
"+",
"self",
".",
"run_params",
".",
"sky_state",
"+",
"'_z'",
"+",
"str",
"(",
"self",
".",
"run_params",
".",
"sky_zenith",
")",
"+",
"'_a'",
"+",
"str",
"(",
"self",
".",
"run_params",
".",
"sky_azimuth",
")",
"+",
"'_'",
"+",
"str",
"(",
"self",
".",
"run_params",
".",
"num_bands",
")",
"+",
"'_'",
"+",
"self",
".",
"run_params",
".",
"ds_code",
")",
")",
"for",
"sza",
"in",
"self",
".",
"sza_list",
":",
"# update the saz in the run file",
"self",
".",
"run_params",
".",
"sky_zenith",
"=",
"sza",
"self",
".",
"run_params",
".",
"sky_file",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"run_params",
".",
"input_path",
",",
"'sky_files'",
")",
",",
"'sky_'",
"+",
"self",
".",
"run_params",
".",
"sky_state",
"+",
"'_z'",
"+",
"str",
"(",
"self",
".",
"run_params",
".",
"sky_zenith",
")",
"+",
"'_a'",
"+",
"str",
"(",
"self",
".",
"run_params",
".",
"sky_azimuth",
")",
"+",
"'_'",
"+",
"str",
"(",
"self",
".",
"run_params",
".",
"num_bands",
")",
"+",
"'_'",
"+",
"self",
".",
"run_params",
".",
"ds_code",
")",
")",
"for",
"p",
"in",
"self",
".",
"p_list",
":",
"for",
"x",
"in",
"self",
".",
"x_list",
":",
"for",
"y",
"in",
"self",
".",
"y_list",
":",
"for",
"g",
"in",
"self",
".",
"g_list",
":",
"for",
"s",
"in",
"self",
".",
"s_list",
":",
"for",
"z",
"in",
"self",
".",
"z_list",
":",
"file_name",
"=",
"'SAA'",
"+",
"str",
"(",
"saa",
")",
"+",
"'_SZA'",
"+",
"str",
"(",
"sza",
")",
"+",
"'_P'",
"+",
"str",
"(",
"p",
")",
"+",
"'_X'",
"+",
"str",
"(",
"x",
")",
"+",
"'_Y'",
"+",
"str",
"(",
"y",
")",
"+",
"'_G'",
"+",
"str",
"(",
"g",
")",
"+",
"'_S'",
"+",
"str",
"(",
"s",
")",
"+",
"'_Z'",
"+",
"str",
"(",
"z",
")",
"dir_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"batch_output",
",",
"file_name",
")",
"self",
".",
"run_params",
".",
"output_path",
"=",
"dir_name",
"#--------------------------------------------------#",
"# UPDATE THE IOP PARAMETERS FOR THE RUN FILE",
"#--------------------------------------------------#",
"self",
".",
"run_params",
".",
"sky_azimuth",
"=",
"saa",
"self",
".",
"run_params",
".",
"sky_zenith",
"=",
"sza",
"self",
".",
"run_params",
".",
"depth",
"=",
"z",
"self",
".",
"bio_params",
".",
"build_bbp",
"(",
"x",
",",
"y",
")",
"# todo add wave const as a kwarg",
"self",
".",
"bio_params",
".",
"build_a_cdom",
"(",
"g",
",",
"s",
")",
"# Need to re-read the file as it was scaled in a the other run!",
"self",
".",
"bio_params",
".",
"read_aphi_from_file",
"(",
"self",
".",
"run_params",
".",
"phytoplankton_absorption_file",
")",
"self",
".",
"bio_params",
".",
"scale_aphi",
"(",
"p",
")",
"self",
".",
"bio_params",
".",
"build_all_iop",
"(",
")",
"self",
".",
"run_params",
".",
"scattering_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"run_params",
".",
"input_path",
",",
"'iop_files'",
")",
",",
"'b_'",
"+",
"file_name",
")",
"self",
".",
"bio_params",
".",
"write_b_to_file",
"(",
"self",
".",
"run_params",
".",
"scattering_file",
")",
"self",
".",
"run_params",
".",
"attenuation_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"run_params",
".",
"input_path",
",",
"'iop_files'",
")",
",",
"'c_'",
"+",
"file_name",
")",
"self",
".",
"bio_params",
".",
"write_c_to_file",
"(",
"self",
".",
"run_params",
".",
"attenuation_file",
")",
"self",
".",
"run_params",
".",
"project_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_name",
",",
"'batch.txt'",
")",
"self",
".",
"run_params",
".",
"report_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_name",
",",
"'report.txt'",
")",
"self",
".",
"run_params",
".",
"write_sky_params_to_file",
"(",
")",
"self",
".",
"run_params",
".",
"write_surf_params_to_file",
"(",
")",
"self",
".",
"run_params",
".",
"write_phase_params_to_file",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dir_name",
")",
":",
"try",
":",
"lg",
".",
"info",
"(",
"'Creating run directory'",
")",
"os",
".",
"makedirs",
"(",
"dir_name",
")",
"self",
".",
"run_params",
".",
"write_run_parameters_to_file",
"(",
")",
"except",
"OSError",
":",
"lg",
".",
"exception",
"(",
"'Could not create run directory'",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"dir_name",
")",
"and",
"overwrite",
"==",
"True",
":",
"try",
":",
"lg",
".",
"info",
"(",
"'Creating run directory'",
")",
"lg",
".",
"warning",
"(",
"'Overwriting existing directories'",
")",
"os",
".",
"makedirs",
"(",
"dir_name",
")",
"self",
".",
"run_params",
".",
"write_run_parameters_to_file",
"(",
")",
"except",
"OSError",
":",
"lg",
".",
"exception",
"(",
"'Could not create run directory'",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
BatchRun.batch_parameters
|
Takes lists for parameters and saves them as class properties
:param saa: <list> Sun Azimuth Angle (deg)
:param sza: <list> Sun Zenith Angle (deg)
:param p: <list> Phytoplankton linear scalling factor
:param x: <list> Scattering scaling factor
:param y: <list> Scattering slope factor
:param g: <list> CDOM absorption scaling factor
:param s: <list> CDOM absorption slope factor
:param z: <list> depth (m)
|
libplanarradpy/planrad.py
|
def batch_parameters(self, saa, sza, p, x, y, g, s, z):
"""Takes lists for parameters and saves them as class properties
:param saa: <list> Sun Azimuth Angle (deg)
:param sza: <list> Sun Zenith Angle (deg)
:param p: <list> Phytoplankton linear scalling factor
:param x: <list> Scattering scaling factor
:param y: <list> Scattering slope factor
:param g: <list> CDOM absorption scaling factor
:param s: <list> CDOM absorption slope factor
:param z: <list> depth (m)"""
self.saa_list = saa
self.sza_list = sza
self.p_list = p
self.x_list = x
self.y_list = y
self.g_list = g
self.s_list = s
self.z_list = z
|
def batch_parameters(self, saa, sza, p, x, y, g, s, z):
"""Takes lists for parameters and saves them as class properties
:param saa: <list> Sun Azimuth Angle (deg)
:param sza: <list> Sun Zenith Angle (deg)
:param p: <list> Phytoplankton linear scalling factor
:param x: <list> Scattering scaling factor
:param y: <list> Scattering slope factor
:param g: <list> CDOM absorption scaling factor
:param s: <list> CDOM absorption slope factor
:param z: <list> depth (m)"""
self.saa_list = saa
self.sza_list = sza
self.p_list = p
self.x_list = x
self.y_list = y
self.g_list = g
self.s_list = s
self.z_list = z
|
[
"Takes",
"lists",
"for",
"parameters",
"and",
"saves",
"them",
"as",
"class",
"properties"
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/libplanarradpy/planrad.py#L868-L886
|
[
"def",
"batch_parameters",
"(",
"self",
",",
"saa",
",",
"sza",
",",
"p",
",",
"x",
",",
"y",
",",
"g",
",",
"s",
",",
"z",
")",
":",
"self",
".",
"saa_list",
"=",
"saa",
"self",
".",
"sza_list",
"=",
"sza",
"self",
".",
"p_list",
"=",
"p",
"self",
".",
"x_list",
"=",
"x",
"self",
".",
"y_list",
"=",
"y",
"self",
".",
"g_list",
"=",
"g",
"self",
".",
"s_list",
"=",
"s",
"self",
".",
"z_list",
"=",
"z"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FileTools.read_param_file_to_dict
|
Loads a text file to a python dictionary using '=' as the delimiter
:param file_name: the name and path of the text file
|
libplanarradpy/planrad.py
|
def read_param_file_to_dict(file_name):
"""Loads a text file to a python dictionary using '=' as the delimiter
:param file_name: the name and path of the text file
"""
data = loadtxt(file_name, delimiter='=', dtype=scipy.string0)
data_dict = dict(data)
for key in data_dict.keys():
data_dict[key] = data_dict[key].strip()
data_dict[key.strip()] = data_dict[key]
del data_dict[key]
return data_dict
|
def read_param_file_to_dict(file_name):
"""Loads a text file to a python dictionary using '=' as the delimiter
:param file_name: the name and path of the text file
"""
data = loadtxt(file_name, delimiter='=', dtype=scipy.string0)
data_dict = dict(data)
for key in data_dict.keys():
data_dict[key] = data_dict[key].strip()
data_dict[key.strip()] = data_dict[key]
del data_dict[key]
return data_dict
|
[
"Loads",
"a",
"text",
"file",
"to",
"a",
"python",
"dictionary",
"using",
"=",
"as",
"the",
"delimiter"
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/libplanarradpy/planrad.py#L896-L908
|
[
"def",
"read_param_file_to_dict",
"(",
"file_name",
")",
":",
"data",
"=",
"loadtxt",
"(",
"file_name",
",",
"delimiter",
"=",
"'='",
",",
"dtype",
"=",
"scipy",
".",
"string0",
")",
"data_dict",
"=",
"dict",
"(",
"data",
")",
"for",
"key",
"in",
"data_dict",
".",
"keys",
"(",
")",
":",
"data_dict",
"[",
"key",
"]",
"=",
"data_dict",
"[",
"key",
"]",
".",
"strip",
"(",
")",
"data_dict",
"[",
"key",
".",
"strip",
"(",
")",
"]",
"=",
"data_dict",
"[",
"key",
"]",
"del",
"data_dict",
"[",
"key",
"]",
"return",
"data_dict"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
HelperMethods.string_to_float_list
|
Pull comma separated string values out of a text file and converts them to float list
|
libplanarradpy/planrad.py
|
def string_to_float_list(string_var):
"""Pull comma separated string values out of a text file and converts them to float list"""
try:
return [float(s) for s in string_var.strip('[').strip(']').split(', ')]
except:
return [float(s) for s in string_var.strip('[').strip(']').split(',')]
|
def string_to_float_list(string_var):
"""Pull comma separated string values out of a text file and converts them to float list"""
try:
return [float(s) for s in string_var.strip('[').strip(']').split(', ')]
except:
return [float(s) for s in string_var.strip('[').strip(']').split(',')]
|
[
"Pull",
"comma",
"separated",
"string",
"values",
"out",
"of",
"a",
"text",
"file",
"and",
"converts",
"them",
"to",
"float",
"list"
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/libplanarradpy/planrad.py#L929-L934
|
[
"def",
"string_to_float_list",
"(",
"string_var",
")",
":",
"try",
":",
"return",
"[",
"float",
"(",
"s",
")",
"for",
"s",
"in",
"string_var",
".",
"strip",
"(",
"'['",
")",
".",
"strip",
"(",
"']'",
")",
".",
"split",
"(",
"', '",
")",
"]",
"except",
":",
"return",
"[",
"float",
"(",
"s",
")",
"for",
"s",
"in",
"string_var",
".",
"strip",
"(",
"'['",
")",
".",
"strip",
"(",
"']'",
")",
".",
"split",
"(",
"','",
")",
"]"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
ReportTools.read_pr_report
|
Reads in a PlanarRad generated report
Saves the single line reported parameters as a python dictionary
:param filename: The name and path of the PlanarRad generated file
:returns self.data_dictionary: python dictionary with the key and values from the report
|
libplanarradpy/planrad.py
|
def read_pr_report(self, filename):
"""Reads in a PlanarRad generated report
Saves the single line reported parameters as a python dictionary
:param filename: The name and path of the PlanarRad generated file
:returns self.data_dictionary: python dictionary with the key and values from the report
"""
done = False
f = open(filename)
while f:
#for line in open(filename):
line = f.readline()
if not line:
done = True
break
if "# Quad solid angle mean point theta table (rows are horizontal, columns are vertical):" in line.strip():
# read in the bunch of lines.
tmp = []
for i_iter in range(0, len(self.data_dictionary['theta_points_deg']) - 2):
tmp.append(f.readline())
self.data_dictionary['Quad_solid_angle_mean_point_theta'] = tmp
elif '#' not in line or not line.strip():
element = line.split(',')
self.data_dictionary[element[0]] = element[1:]
if "# Quad solid angle mean point phi table (rows are horizontal, columns are vertical):" in line.strip():
# read in the bunch of lines.
tmp = []
for i_iter in range(0, len(self.data_dictionary['theta_points_deg']) - 2):
tmp.append(f.readline())
self.data_dictionary['Quad_solid_angle_mean_point_phi'] = tmp
elif '#' not in line or not line.strip():
element = line.split(',')
self.data_dictionary[element[0]] = element[1:]
if "L_w band" in line.strip():
for i_iter in range(0, int(self.data_dictionary['band_count'][1])):
tmp = []
for j_iter in range(0, len(self.data_dictionary['theta_points_deg']) - 2):
tmp.append(f.readline())
self.data_dictionary['L_w_band_' + str(i_iter + 1)] = tmp
f.readline()
f.readline() # skip the next 2 lines
if "L_it band" in line.strip():
for i_iter in range(0, int(self.data_dictionary['band_count'][1])):
tmp = []
for j_iter in range(0, len(self.data_dictionary['theta_points_deg']) - 2):
tmp.append(f.readline())
self.data_dictionary['L_it_band_' + str(i_iter + 1)] = tmp
f.readline()
f.readline() # skip the next 2 lines
return self.data_dictionary
|
def read_pr_report(self, filename):
"""Reads in a PlanarRad generated report
Saves the single line reported parameters as a python dictionary
:param filename: The name and path of the PlanarRad generated file
:returns self.data_dictionary: python dictionary with the key and values from the report
"""
done = False
f = open(filename)
while f:
#for line in open(filename):
line = f.readline()
if not line:
done = True
break
if "# Quad solid angle mean point theta table (rows are horizontal, columns are vertical):" in line.strip():
# read in the bunch of lines.
tmp = []
for i_iter in range(0, len(self.data_dictionary['theta_points_deg']) - 2):
tmp.append(f.readline())
self.data_dictionary['Quad_solid_angle_mean_point_theta'] = tmp
elif '#' not in line or not line.strip():
element = line.split(',')
self.data_dictionary[element[0]] = element[1:]
if "# Quad solid angle mean point phi table (rows are horizontal, columns are vertical):" in line.strip():
# read in the bunch of lines.
tmp = []
for i_iter in range(0, len(self.data_dictionary['theta_points_deg']) - 2):
tmp.append(f.readline())
self.data_dictionary['Quad_solid_angle_mean_point_phi'] = tmp
elif '#' not in line or not line.strip():
element = line.split(',')
self.data_dictionary[element[0]] = element[1:]
if "L_w band" in line.strip():
for i_iter in range(0, int(self.data_dictionary['band_count'][1])):
tmp = []
for j_iter in range(0, len(self.data_dictionary['theta_points_deg']) - 2):
tmp.append(f.readline())
self.data_dictionary['L_w_band_' + str(i_iter + 1)] = tmp
f.readline()
f.readline() # skip the next 2 lines
if "L_it band" in line.strip():
for i_iter in range(0, int(self.data_dictionary['band_count'][1])):
tmp = []
for j_iter in range(0, len(self.data_dictionary['theta_points_deg']) - 2):
tmp.append(f.readline())
self.data_dictionary['L_it_band_' + str(i_iter + 1)] = tmp
f.readline()
f.readline() # skip the next 2 lines
return self.data_dictionary
|
[
"Reads",
"in",
"a",
"PlanarRad",
"generated",
"report"
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/libplanarradpy/planrad.py#L943-L1009
|
[
"def",
"read_pr_report",
"(",
"self",
",",
"filename",
")",
":",
"done",
"=",
"False",
"f",
"=",
"open",
"(",
"filename",
")",
"while",
"f",
":",
"#for line in open(filename):",
"line",
"=",
"f",
".",
"readline",
"(",
")",
"if",
"not",
"line",
":",
"done",
"=",
"True",
"break",
"if",
"\"# Quad solid angle mean point theta table (rows are horizontal, columns are vertical):\"",
"in",
"line",
".",
"strip",
"(",
")",
":",
"# read in the bunch of lines.",
"tmp",
"=",
"[",
"]",
"for",
"i_iter",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"data_dictionary",
"[",
"'theta_points_deg'",
"]",
")",
"-",
"2",
")",
":",
"tmp",
".",
"append",
"(",
"f",
".",
"readline",
"(",
")",
")",
"self",
".",
"data_dictionary",
"[",
"'Quad_solid_angle_mean_point_theta'",
"]",
"=",
"tmp",
"elif",
"'#'",
"not",
"in",
"line",
"or",
"not",
"line",
".",
"strip",
"(",
")",
":",
"element",
"=",
"line",
".",
"split",
"(",
"','",
")",
"self",
".",
"data_dictionary",
"[",
"element",
"[",
"0",
"]",
"]",
"=",
"element",
"[",
"1",
":",
"]",
"if",
"\"# Quad solid angle mean point phi table (rows are horizontal, columns are vertical):\"",
"in",
"line",
".",
"strip",
"(",
")",
":",
"# read in the bunch of lines.",
"tmp",
"=",
"[",
"]",
"for",
"i_iter",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"data_dictionary",
"[",
"'theta_points_deg'",
"]",
")",
"-",
"2",
")",
":",
"tmp",
".",
"append",
"(",
"f",
".",
"readline",
"(",
")",
")",
"self",
".",
"data_dictionary",
"[",
"'Quad_solid_angle_mean_point_phi'",
"]",
"=",
"tmp",
"elif",
"'#'",
"not",
"in",
"line",
"or",
"not",
"line",
".",
"strip",
"(",
")",
":",
"element",
"=",
"line",
".",
"split",
"(",
"','",
")",
"self",
".",
"data_dictionary",
"[",
"element",
"[",
"0",
"]",
"]",
"=",
"element",
"[",
"1",
":",
"]",
"if",
"\"L_w band\"",
"in",
"line",
".",
"strip",
"(",
")",
":",
"for",
"i_iter",
"in",
"range",
"(",
"0",
",",
"int",
"(",
"self",
".",
"data_dictionary",
"[",
"'band_count'",
"]",
"[",
"1",
"]",
")",
")",
":",
"tmp",
"=",
"[",
"]",
"for",
"j_iter",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"data_dictionary",
"[",
"'theta_points_deg'",
"]",
")",
"-",
"2",
")",
":",
"tmp",
".",
"append",
"(",
"f",
".",
"readline",
"(",
")",
")",
"self",
".",
"data_dictionary",
"[",
"'L_w_band_'",
"+",
"str",
"(",
"i_iter",
"+",
"1",
")",
"]",
"=",
"tmp",
"f",
".",
"readline",
"(",
")",
"f",
".",
"readline",
"(",
")",
"# skip the next 2 lines",
"if",
"\"L_it band\"",
"in",
"line",
".",
"strip",
"(",
")",
":",
"for",
"i_iter",
"in",
"range",
"(",
"0",
",",
"int",
"(",
"self",
".",
"data_dictionary",
"[",
"'band_count'",
"]",
"[",
"1",
"]",
")",
")",
":",
"tmp",
"=",
"[",
"]",
"for",
"j_iter",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"data_dictionary",
"[",
"'theta_points_deg'",
"]",
")",
"-",
"2",
")",
":",
"tmp",
".",
"append",
"(",
"f",
".",
"readline",
"(",
")",
")",
"self",
".",
"data_dictionary",
"[",
"'L_it_band_'",
"+",
"str",
"(",
"i_iter",
"+",
"1",
")",
"]",
"=",
"tmp",
"f",
".",
"readline",
"(",
")",
"f",
".",
"readline",
"(",
")",
"# skip the next 2 lines",
"return",
"self",
".",
"data_dictionary"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
ReportTools.calc_directional_aop
|
Will calcuate the directional AOP (only sub-surface rrs for now) if the direction is defined using @
e.g. rrs@32.0:45 where <zenith-theta>:<azimuth-phi>
:param report: The planarrad report dictionary. should include the quadtables and the directional info
:param parameter: parameter to calc. Currently only sub-surface reflectance rrs.
:return:
|
libplanarradpy/planrad.py
|
def calc_directional_aop(self, report, parameter, parameter_dir):
"""
Will calcuate the directional AOP (only sub-surface rrs for now) if the direction is defined using @
e.g. rrs@32.0:45 where <zenith-theta>:<azimuth-phi>
:param report: The planarrad report dictionary. should include the quadtables and the directional info
:param parameter: parameter to calc. Currently only sub-surface reflectance rrs.
:return:
"""
lg.debug('calculating the directional ' + parameter)
tmp_zenith = []
param_zenith = parameter_dir.split(':')[0]
param_azimuth = parameter_dir.split(':')[1]
# --------------------------------------------------#
# find the mean directions values
# --------------------------------------------------#
for i_iter in range(0, int(report['vn'][1])):
tmp_zenith.append(report['Quad_solid_angle_mean_point_theta'][i_iter][:].split(',')[0]) #that was a pain!
tmp_azimuth = report['Quad_solid_angle_mean_point_phi'][1]
zenith = scipy.asarray(tmp_zenith, dtype=float)
azimuth = scipy.fromstring(tmp_azimuth, dtype=float, sep=',')
# --------------------------------------------------#
# now grab the min and max index of the closest match
# --------------------------------------------------#
#min_zenith_idx = (scipy.abs(zenith - param_zenith)).argmin()
from scipy import interpolate
lw = scipy.zeros(int(report['band_count'][1]))
for j_iter in range(0, int(report['band_count'][1])):
if parameter == 'rrs':
lg.info('Calculating directional rrs')
tmp_lw = report['L_w_band_' + str(j_iter + 1)]
elif parameter == 'Rrs':
lg.info('Calculating directional Rrs')
print(report.keys())
tmp_lw = report['L_it_band_' + str(j_iter + 1)]
lw_scal = scipy.zeros((int(report['vn'][1]), int(report['hn'][1])))
# for the fist and last line we have to replicate the top and bottom circle
for i_iter in range(0, int(report['hn'][1])):
lw_scal[0, i_iter] = tmp_lw[0].split(',')[0]
lw_scal[int(report['vn'][1]) - 1, i_iter] = tmp_lw[-1].split(',')[0]
for i_iter in range(1, int(report['vn'][1]) - 1):
lw_scal[i_iter, :] = scipy.asarray(tmp_lw[i_iter].split(','), dtype=float)
# to do, make an array of zeros and loop over each list an apply to eah line. bruteforce
f1 = interpolate.interp2d(zenith, azimuth, lw_scal)
lw[j_iter] = f1(float(param_zenith), float(param_azimuth))
# ----
# Now we finally have L_w we calculate the rrs
# ----
if parameter == 'rrs':
tmp_rrs = lw / scipy.asarray(report['Ed_w'], dtype=float)[1:] # ignore the first val as that is depth of val
elif parameter == 'Rrs':
tmp_rrs = lw / scipy.asarray(report['Ed_a'], dtype=float)[1:] # ignore the first val as that is depth of val
# make rrs a string so it can be written to file.
rrs = ",".join(map(str, tmp_rrs))
return " ," + rrs
|
def calc_directional_aop(self, report, parameter, parameter_dir):
"""
Will calcuate the directional AOP (only sub-surface rrs for now) if the direction is defined using @
e.g. rrs@32.0:45 where <zenith-theta>:<azimuth-phi>
:param report: The planarrad report dictionary. should include the quadtables and the directional info
:param parameter: parameter to calc. Currently only sub-surface reflectance rrs.
:return:
"""
lg.debug('calculating the directional ' + parameter)
tmp_zenith = []
param_zenith = parameter_dir.split(':')[0]
param_azimuth = parameter_dir.split(':')[1]
# --------------------------------------------------#
# find the mean directions values
# --------------------------------------------------#
for i_iter in range(0, int(report['vn'][1])):
tmp_zenith.append(report['Quad_solid_angle_mean_point_theta'][i_iter][:].split(',')[0]) #that was a pain!
tmp_azimuth = report['Quad_solid_angle_mean_point_phi'][1]
zenith = scipy.asarray(tmp_zenith, dtype=float)
azimuth = scipy.fromstring(tmp_azimuth, dtype=float, sep=',')
# --------------------------------------------------#
# now grab the min and max index of the closest match
# --------------------------------------------------#
#min_zenith_idx = (scipy.abs(zenith - param_zenith)).argmin()
from scipy import interpolate
lw = scipy.zeros(int(report['band_count'][1]))
for j_iter in range(0, int(report['band_count'][1])):
if parameter == 'rrs':
lg.info('Calculating directional rrs')
tmp_lw = report['L_w_band_' + str(j_iter + 1)]
elif parameter == 'Rrs':
lg.info('Calculating directional Rrs')
print(report.keys())
tmp_lw = report['L_it_band_' + str(j_iter + 1)]
lw_scal = scipy.zeros((int(report['vn'][1]), int(report['hn'][1])))
# for the fist and last line we have to replicate the top and bottom circle
for i_iter in range(0, int(report['hn'][1])):
lw_scal[0, i_iter] = tmp_lw[0].split(',')[0]
lw_scal[int(report['vn'][1]) - 1, i_iter] = tmp_lw[-1].split(',')[0]
for i_iter in range(1, int(report['vn'][1]) - 1):
lw_scal[i_iter, :] = scipy.asarray(tmp_lw[i_iter].split(','), dtype=float)
# to do, make an array of zeros and loop over each list an apply to eah line. bruteforce
f1 = interpolate.interp2d(zenith, azimuth, lw_scal)
lw[j_iter] = f1(float(param_zenith), float(param_azimuth))
# ----
# Now we finally have L_w we calculate the rrs
# ----
if parameter == 'rrs':
tmp_rrs = lw / scipy.asarray(report['Ed_w'], dtype=float)[1:] # ignore the first val as that is depth of val
elif parameter == 'Rrs':
tmp_rrs = lw / scipy.asarray(report['Ed_a'], dtype=float)[1:] # ignore the first val as that is depth of val
# make rrs a string so it can be written to file.
rrs = ",".join(map(str, tmp_rrs))
return " ," + rrs
|
[
"Will",
"calcuate",
"the",
"directional",
"AOP",
"(",
"only",
"sub",
"-",
"surface",
"rrs",
"for",
"now",
")",
"if",
"the",
"direction",
"is",
"defined",
"using",
"@",
"e",
".",
"g",
".",
"rrs@32",
".",
"0",
":",
"45",
"where",
"<zenith",
"-",
"theta",
">",
":",
"<azimuth",
"-",
"phi",
">"
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/libplanarradpy/planrad.py#L1014-L1087
|
[
"def",
"calc_directional_aop",
"(",
"self",
",",
"report",
",",
"parameter",
",",
"parameter_dir",
")",
":",
"lg",
".",
"debug",
"(",
"'calculating the directional '",
"+",
"parameter",
")",
"tmp_zenith",
"=",
"[",
"]",
"param_zenith",
"=",
"parameter_dir",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
"param_azimuth",
"=",
"parameter_dir",
".",
"split",
"(",
"':'",
")",
"[",
"1",
"]",
"# --------------------------------------------------#",
"# find the mean directions values",
"# --------------------------------------------------#",
"for",
"i_iter",
"in",
"range",
"(",
"0",
",",
"int",
"(",
"report",
"[",
"'vn'",
"]",
"[",
"1",
"]",
")",
")",
":",
"tmp_zenith",
".",
"append",
"(",
"report",
"[",
"'Quad_solid_angle_mean_point_theta'",
"]",
"[",
"i_iter",
"]",
"[",
":",
"]",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
")",
"#that was a pain!",
"tmp_azimuth",
"=",
"report",
"[",
"'Quad_solid_angle_mean_point_phi'",
"]",
"[",
"1",
"]",
"zenith",
"=",
"scipy",
".",
"asarray",
"(",
"tmp_zenith",
",",
"dtype",
"=",
"float",
")",
"azimuth",
"=",
"scipy",
".",
"fromstring",
"(",
"tmp_azimuth",
",",
"dtype",
"=",
"float",
",",
"sep",
"=",
"','",
")",
"# --------------------------------------------------#",
"# now grab the min and max index of the closest match",
"# --------------------------------------------------#",
"#min_zenith_idx = (scipy.abs(zenith - param_zenith)).argmin()",
"from",
"scipy",
"import",
"interpolate",
"lw",
"=",
"scipy",
".",
"zeros",
"(",
"int",
"(",
"report",
"[",
"'band_count'",
"]",
"[",
"1",
"]",
")",
")",
"for",
"j_iter",
"in",
"range",
"(",
"0",
",",
"int",
"(",
"report",
"[",
"'band_count'",
"]",
"[",
"1",
"]",
")",
")",
":",
"if",
"parameter",
"==",
"'rrs'",
":",
"lg",
".",
"info",
"(",
"'Calculating directional rrs'",
")",
"tmp_lw",
"=",
"report",
"[",
"'L_w_band_'",
"+",
"str",
"(",
"j_iter",
"+",
"1",
")",
"]",
"elif",
"parameter",
"==",
"'Rrs'",
":",
"lg",
".",
"info",
"(",
"'Calculating directional Rrs'",
")",
"print",
"(",
"report",
".",
"keys",
"(",
")",
")",
"tmp_lw",
"=",
"report",
"[",
"'L_it_band_'",
"+",
"str",
"(",
"j_iter",
"+",
"1",
")",
"]",
"lw_scal",
"=",
"scipy",
".",
"zeros",
"(",
"(",
"int",
"(",
"report",
"[",
"'vn'",
"]",
"[",
"1",
"]",
")",
",",
"int",
"(",
"report",
"[",
"'hn'",
"]",
"[",
"1",
"]",
")",
")",
")",
"# for the fist and last line we have to replicate the top and bottom circle",
"for",
"i_iter",
"in",
"range",
"(",
"0",
",",
"int",
"(",
"report",
"[",
"'hn'",
"]",
"[",
"1",
"]",
")",
")",
":",
"lw_scal",
"[",
"0",
",",
"i_iter",
"]",
"=",
"tmp_lw",
"[",
"0",
"]",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
"lw_scal",
"[",
"int",
"(",
"report",
"[",
"'vn'",
"]",
"[",
"1",
"]",
")",
"-",
"1",
",",
"i_iter",
"]",
"=",
"tmp_lw",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
"for",
"i_iter",
"in",
"range",
"(",
"1",
",",
"int",
"(",
"report",
"[",
"'vn'",
"]",
"[",
"1",
"]",
")",
"-",
"1",
")",
":",
"lw_scal",
"[",
"i_iter",
",",
":",
"]",
"=",
"scipy",
".",
"asarray",
"(",
"tmp_lw",
"[",
"i_iter",
"]",
".",
"split",
"(",
"','",
")",
",",
"dtype",
"=",
"float",
")",
"# to do, make an array of zeros and loop over each list an apply to eah line. bruteforce",
"f1",
"=",
"interpolate",
".",
"interp2d",
"(",
"zenith",
",",
"azimuth",
",",
"lw_scal",
")",
"lw",
"[",
"j_iter",
"]",
"=",
"f1",
"(",
"float",
"(",
"param_zenith",
")",
",",
"float",
"(",
"param_azimuth",
")",
")",
"# ----",
"# Now we finally have L_w we calculate the rrs",
"# ----",
"if",
"parameter",
"==",
"'rrs'",
":",
"tmp_rrs",
"=",
"lw",
"/",
"scipy",
".",
"asarray",
"(",
"report",
"[",
"'Ed_w'",
"]",
",",
"dtype",
"=",
"float",
")",
"[",
"1",
":",
"]",
"# ignore the first val as that is depth of val",
"elif",
"parameter",
"==",
"'Rrs'",
":",
"tmp_rrs",
"=",
"lw",
"/",
"scipy",
".",
"asarray",
"(",
"report",
"[",
"'Ed_a'",
"]",
",",
"dtype",
"=",
"float",
")",
"[",
"1",
":",
"]",
"# ignore the first val as that is depth of val",
"# make rrs a string so it can be written to file.",
"rrs",
"=",
"\",\"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"tmp_rrs",
")",
")",
"return",
"\" ,\"",
"+",
"rrs"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
ReportTools.write_batch_report
|
Collect all of the batch reports and concatenate the results. The report should be :
:param input_directory:
:param parameter: This is the parameter in which to report.
|
libplanarradpy/planrad.py
|
def write_batch_report(self, input_directory, parameter):
"""
Collect all of the batch reports and concatenate the results. The report should be :
:param input_directory:
:param parameter: This is the parameter in which to report.
"""
# Check to see if there is an @ in the parameter. If there is split
if '@' in parameter:
parameter_dir = parameter.split('@')[1]
parameter = parameter.split('@')[0]
# --------------------------------------------------#
# we put the batch report one directory up in the tree
# --------------------------------------------------#
batch_report_file = 'batch_report.txt'
batch_report_file = os.path.join(input_directory, batch_report_file)
f = open(batch_report_file, 'w')
w = csv.writer(f, delimiter=',')
#--------------------------------------------------#
# Read in the report from planarrad and pull out the parameter that we want
#--------------------------------------------------#
dir_list = os.listdir(input_directory)
#--------------------------------------------------#
# Sometimes the report isn't generated for some reason.
# this checks to see if the first file in the dir list exists and skips if it doesn't
#--------------------------------------------------#
read_first_file = True
i_iter = 0
while read_first_file:
if os.path.exists(os.path.join(input_directory, os.path.join(dir_list[i_iter], 'report.txt'))):
report = self.read_pr_report(
os.path.join(input_directory, os.path.join(dir_list[i_iter], 'report.txt')))
read_first_file = False
else:
lg.warning('Missing report file in' + dir_list[i_iter])
i_iter += 1
try:
wave_val = report['band_centres']
param_val = report[parameter]
except:
lg.exception('Parameter :: ' + str(parameter) + ' :: Not in report')
wave_str = str(wave_val)
wave_str = wave_str.strip('[').strip(']').replace('\'', '').replace('\\n', '').replace(' ', '').replace(' -,',
'').replace(
',', '\",\"')
f.write(
'\"Sun Azimuth (deg)\",\"Sun Zenith (deg)\",\"Phytoplankton\",\"Scattering X\",\"Scattering Y\",\"CDOM G\",\"CDOM S\",\"Depth (m)\",\"#wave length (nm) ->\",\"' + wave_str + '\"\n')
#--------------------------------------------------#
# Get all of the directories under the batch directories
# The directory names have the IOP parameters in the names
#--------------------------------------------------#
for dir in dir_list:
if os.path.isdir(os.path.abspath(os.path.join(input_directory, dir))):
tmp_str_list = dir.split('_')
#for tmp_str in tmp_str_list:
saa = ''.join(c for c in tmp_str_list[0] if not c.isalpha())
sza = ''.join(c for c in tmp_str_list[1] if not c.isalpha())
p = ''.join(c for c in tmp_str_list[2] if not c.isalpha())
x = ''.join(c for c in tmp_str_list[3] if not c.isalpha())
y = ''.join(c for c in tmp_str_list[4] if not c.isalpha())
g = ''.join(c for c in tmp_str_list[5] if not c.isalpha())
s = ''.join(c for c in tmp_str_list[6] if not c.isalpha())
z = ''.join(c for c in tmp_str_list[7] if not c.isalpha())
#--------------------------------------------------#
# Write the report header and then the values above in the columns
#--------------------------------------------------#
try:
f.write(saa + ',' + sza + ',' + p + ',' + x + ',' + y + ',' + g + ',' + s + ',' + z + ',')
report = self.read_pr_report(os.path.join(input_directory, os.path.join(dir, 'report.txt')))
try:
# check to see if the parameter has the @ parameter. If it does pass to directional calculator
if 'parameter_dir' in locals():
param_val = self.calc_directional_aop(report, parameter, parameter_dir)
else:
param_val = report[parameter]
param_str = str(param_val)
param_str = param_str.strip('[').strip(']').replace('\'', '').replace('\\n', '').replace(' ',
'')
f.write(param_str + '\n')
except:
lg.exception('Parameter :: ' + str(parameter) + ' :: Not in report')
except:
lg.warning('Cannot find a report in directory :: ' + dir)
|
def write_batch_report(self, input_directory, parameter):
"""
Collect all of the batch reports and concatenate the results. The report should be :
:param input_directory:
:param parameter: This is the parameter in which to report.
"""
# Check to see if there is an @ in the parameter. If there is split
if '@' in parameter:
parameter_dir = parameter.split('@')[1]
parameter = parameter.split('@')[0]
# --------------------------------------------------#
# we put the batch report one directory up in the tree
# --------------------------------------------------#
batch_report_file = 'batch_report.txt'
batch_report_file = os.path.join(input_directory, batch_report_file)
f = open(batch_report_file, 'w')
w = csv.writer(f, delimiter=',')
#--------------------------------------------------#
# Read in the report from planarrad and pull out the parameter that we want
#--------------------------------------------------#
dir_list = os.listdir(input_directory)
#--------------------------------------------------#
# Sometimes the report isn't generated for some reason.
# this checks to see if the first file in the dir list exists and skips if it doesn't
#--------------------------------------------------#
read_first_file = True
i_iter = 0
while read_first_file:
if os.path.exists(os.path.join(input_directory, os.path.join(dir_list[i_iter], 'report.txt'))):
report = self.read_pr_report(
os.path.join(input_directory, os.path.join(dir_list[i_iter], 'report.txt')))
read_first_file = False
else:
lg.warning('Missing report file in' + dir_list[i_iter])
i_iter += 1
try:
wave_val = report['band_centres']
param_val = report[parameter]
except:
lg.exception('Parameter :: ' + str(parameter) + ' :: Not in report')
wave_str = str(wave_val)
wave_str = wave_str.strip('[').strip(']').replace('\'', '').replace('\\n', '').replace(' ', '').replace(' -,',
'').replace(
',', '\",\"')
f.write(
'\"Sun Azimuth (deg)\",\"Sun Zenith (deg)\",\"Phytoplankton\",\"Scattering X\",\"Scattering Y\",\"CDOM G\",\"CDOM S\",\"Depth (m)\",\"#wave length (nm) ->\",\"' + wave_str + '\"\n')
#--------------------------------------------------#
# Get all of the directories under the batch directories
# The directory names have the IOP parameters in the names
#--------------------------------------------------#
for dir in dir_list:
if os.path.isdir(os.path.abspath(os.path.join(input_directory, dir))):
tmp_str_list = dir.split('_')
#for tmp_str in tmp_str_list:
saa = ''.join(c for c in tmp_str_list[0] if not c.isalpha())
sza = ''.join(c for c in tmp_str_list[1] if not c.isalpha())
p = ''.join(c for c in tmp_str_list[2] if not c.isalpha())
x = ''.join(c for c in tmp_str_list[3] if not c.isalpha())
y = ''.join(c for c in tmp_str_list[4] if not c.isalpha())
g = ''.join(c for c in tmp_str_list[5] if not c.isalpha())
s = ''.join(c for c in tmp_str_list[6] if not c.isalpha())
z = ''.join(c for c in tmp_str_list[7] if not c.isalpha())
#--------------------------------------------------#
# Write the report header and then the values above in the columns
#--------------------------------------------------#
try:
f.write(saa + ',' + sza + ',' + p + ',' + x + ',' + y + ',' + g + ',' + s + ',' + z + ',')
report = self.read_pr_report(os.path.join(input_directory, os.path.join(dir, 'report.txt')))
try:
# check to see if the parameter has the @ parameter. If it does pass to directional calculator
if 'parameter_dir' in locals():
param_val = self.calc_directional_aop(report, parameter, parameter_dir)
else:
param_val = report[parameter]
param_str = str(param_val)
param_str = param_str.strip('[').strip(']').replace('\'', '').replace('\\n', '').replace(' ',
'')
f.write(param_str + '\n')
except:
lg.exception('Parameter :: ' + str(parameter) + ' :: Not in report')
except:
lg.warning('Cannot find a report in directory :: ' + dir)
|
[
"Collect",
"all",
"of",
"the",
"batch",
"reports",
"and",
"concatenate",
"the",
"results",
".",
"The",
"report",
"should",
"be",
":"
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/libplanarradpy/planrad.py#L1089-L1183
|
[
"def",
"write_batch_report",
"(",
"self",
",",
"input_directory",
",",
"parameter",
")",
":",
"# Check to see if there is an @ in the parameter. If there is split",
"if",
"'@'",
"in",
"parameter",
":",
"parameter_dir",
"=",
"parameter",
".",
"split",
"(",
"'@'",
")",
"[",
"1",
"]",
"parameter",
"=",
"parameter",
".",
"split",
"(",
"'@'",
")",
"[",
"0",
"]",
"# --------------------------------------------------#",
"# we put the batch report one directory up in the tree",
"# --------------------------------------------------#",
"batch_report_file",
"=",
"'batch_report.txt'",
"batch_report_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"input_directory",
",",
"batch_report_file",
")",
"f",
"=",
"open",
"(",
"batch_report_file",
",",
"'w'",
")",
"w",
"=",
"csv",
".",
"writer",
"(",
"f",
",",
"delimiter",
"=",
"','",
")",
"#--------------------------------------------------#",
"# Read in the report from planarrad and pull out the parameter that we want",
"#--------------------------------------------------#",
"dir_list",
"=",
"os",
".",
"listdir",
"(",
"input_directory",
")",
"#--------------------------------------------------#",
"# Sometimes the report isn't generated for some reason.",
"# this checks to see if the first file in the dir list exists and skips if it doesn't",
"#--------------------------------------------------#",
"read_first_file",
"=",
"True",
"i_iter",
"=",
"0",
"while",
"read_first_file",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"input_directory",
",",
"os",
".",
"path",
".",
"join",
"(",
"dir_list",
"[",
"i_iter",
"]",
",",
"'report.txt'",
")",
")",
")",
":",
"report",
"=",
"self",
".",
"read_pr_report",
"(",
"os",
".",
"path",
".",
"join",
"(",
"input_directory",
",",
"os",
".",
"path",
".",
"join",
"(",
"dir_list",
"[",
"i_iter",
"]",
",",
"'report.txt'",
")",
")",
")",
"read_first_file",
"=",
"False",
"else",
":",
"lg",
".",
"warning",
"(",
"'Missing report file in'",
"+",
"dir_list",
"[",
"i_iter",
"]",
")",
"i_iter",
"+=",
"1",
"try",
":",
"wave_val",
"=",
"report",
"[",
"'band_centres'",
"]",
"param_val",
"=",
"report",
"[",
"parameter",
"]",
"except",
":",
"lg",
".",
"exception",
"(",
"'Parameter :: '",
"+",
"str",
"(",
"parameter",
")",
"+",
"' :: Not in report'",
")",
"wave_str",
"=",
"str",
"(",
"wave_val",
")",
"wave_str",
"=",
"wave_str",
".",
"strip",
"(",
"'['",
")",
".",
"strip",
"(",
"']'",
")",
".",
"replace",
"(",
"'\\''",
",",
"''",
")",
".",
"replace",
"(",
"'\\\\n'",
",",
"''",
")",
".",
"replace",
"(",
"' '",
",",
"''",
")",
".",
"replace",
"(",
"' -,'",
",",
"''",
")",
".",
"replace",
"(",
"','",
",",
"'\\\",\\\"'",
")",
"f",
".",
"write",
"(",
"'\\\"Sun Azimuth (deg)\\\",\\\"Sun Zenith (deg)\\\",\\\"Phytoplankton\\\",\\\"Scattering X\\\",\\\"Scattering Y\\\",\\\"CDOM G\\\",\\\"CDOM S\\\",\\\"Depth (m)\\\",\\\"#wave length (nm) ->\\\",\\\"'",
"+",
"wave_str",
"+",
"'\\\"\\n'",
")",
"#--------------------------------------------------#",
"# Get all of the directories under the batch directories",
"# The directory names have the IOP parameters in the names",
"#--------------------------------------------------#",
"for",
"dir",
"in",
"dir_list",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"input_directory",
",",
"dir",
")",
")",
")",
":",
"tmp_str_list",
"=",
"dir",
".",
"split",
"(",
"'_'",
")",
"#for tmp_str in tmp_str_list:",
"saa",
"=",
"''",
".",
"join",
"(",
"c",
"for",
"c",
"in",
"tmp_str_list",
"[",
"0",
"]",
"if",
"not",
"c",
".",
"isalpha",
"(",
")",
")",
"sza",
"=",
"''",
".",
"join",
"(",
"c",
"for",
"c",
"in",
"tmp_str_list",
"[",
"1",
"]",
"if",
"not",
"c",
".",
"isalpha",
"(",
")",
")",
"p",
"=",
"''",
".",
"join",
"(",
"c",
"for",
"c",
"in",
"tmp_str_list",
"[",
"2",
"]",
"if",
"not",
"c",
".",
"isalpha",
"(",
")",
")",
"x",
"=",
"''",
".",
"join",
"(",
"c",
"for",
"c",
"in",
"tmp_str_list",
"[",
"3",
"]",
"if",
"not",
"c",
".",
"isalpha",
"(",
")",
")",
"y",
"=",
"''",
".",
"join",
"(",
"c",
"for",
"c",
"in",
"tmp_str_list",
"[",
"4",
"]",
"if",
"not",
"c",
".",
"isalpha",
"(",
")",
")",
"g",
"=",
"''",
".",
"join",
"(",
"c",
"for",
"c",
"in",
"tmp_str_list",
"[",
"5",
"]",
"if",
"not",
"c",
".",
"isalpha",
"(",
")",
")",
"s",
"=",
"''",
".",
"join",
"(",
"c",
"for",
"c",
"in",
"tmp_str_list",
"[",
"6",
"]",
"if",
"not",
"c",
".",
"isalpha",
"(",
")",
")",
"z",
"=",
"''",
".",
"join",
"(",
"c",
"for",
"c",
"in",
"tmp_str_list",
"[",
"7",
"]",
"if",
"not",
"c",
".",
"isalpha",
"(",
")",
")",
"#--------------------------------------------------#",
"# Write the report header and then the values above in the columns",
"#--------------------------------------------------#",
"try",
":",
"f",
".",
"write",
"(",
"saa",
"+",
"','",
"+",
"sza",
"+",
"','",
"+",
"p",
"+",
"','",
"+",
"x",
"+",
"','",
"+",
"y",
"+",
"','",
"+",
"g",
"+",
"','",
"+",
"s",
"+",
"','",
"+",
"z",
"+",
"','",
")",
"report",
"=",
"self",
".",
"read_pr_report",
"(",
"os",
".",
"path",
".",
"join",
"(",
"input_directory",
",",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"'report.txt'",
")",
")",
")",
"try",
":",
"# check to see if the parameter has the @ parameter. If it does pass to directional calculator",
"if",
"'parameter_dir'",
"in",
"locals",
"(",
")",
":",
"param_val",
"=",
"self",
".",
"calc_directional_aop",
"(",
"report",
",",
"parameter",
",",
"parameter_dir",
")",
"else",
":",
"param_val",
"=",
"report",
"[",
"parameter",
"]",
"param_str",
"=",
"str",
"(",
"param_val",
")",
"param_str",
"=",
"param_str",
".",
"strip",
"(",
"'['",
")",
".",
"strip",
"(",
"']'",
")",
".",
"replace",
"(",
"'\\''",
",",
"''",
")",
".",
"replace",
"(",
"'\\\\n'",
",",
"''",
")",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"f",
".",
"write",
"(",
"param_str",
"+",
"'\\n'",
")",
"except",
":",
"lg",
".",
"exception",
"(",
"'Parameter :: '",
"+",
"str",
"(",
"parameter",
")",
"+",
"' :: Not in report'",
")",
"except",
":",
"lg",
".",
"warning",
"(",
"'Cannot find a report in directory :: '",
"+",
"dir",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
BatchFile.write_batch_to_file
|
This function creates a new file if he doesn't exist already, moves it to 'inputs/batch_file' folder
and writes data and comments associated to them.
Inputs: saa_values : <list> Sun Azimuth Angle (deg)
sza_values : <list> Sun Zenith Angle (deg)
batch_name : Name of the batch file.
p_values : <list> Phytoplankton linear scaling factor
x_value : <list> Scattering scaling factor
y_value : <list> Scattering slope factor
g_value : <list> CDOM absorption scaling factor
s_value : <list> CDOM absorption slope factor
s_value : <list> depth (m)
waveL_values : Wavelength values used to test.
verbose_value : Number concerning if the software explains a lot or not what it does.
phytoplankton_path : The path to the file containing phytoplankton data.
bottom_path : The path to the file containing bottom data.
nb_cpu : The number of CPU we want to allowed to the software.
executive_path : The path to the file where there is executive files using by PlanarRad.
report_parameter :
|
gui/gui_batch.py
|
def write_batch_to_file(self, filename='batch_test_default.txt'):
"""
This function creates a new file if he doesn't exist already, moves it to 'inputs/batch_file' folder
and writes data and comments associated to them.
Inputs: saa_values : <list> Sun Azimuth Angle (deg)
sza_values : <list> Sun Zenith Angle (deg)
batch_name : Name of the batch file.
p_values : <list> Phytoplankton linear scaling factor
x_value : <list> Scattering scaling factor
y_value : <list> Scattering slope factor
g_value : <list> CDOM absorption scaling factor
s_value : <list> CDOM absorption slope factor
s_value : <list> depth (m)
waveL_values : Wavelength values used to test.
verbose_value : Number concerning if the software explains a lot or not what it does.
phytoplankton_path : The path to the file containing phytoplankton data.
bottom_path : The path to the file containing bottom data.
nb_cpu : The number of CPU we want to allowed to the software.
executive_path : The path to the file where there is executive files using by PlanarRad.
report_parameter :
"""
#---------------------------------------------------------#
# The following is the file which is passed to planarradpy.
#---------------------------------------------------------#
self.batch_file = open(str(filename), 'w')
self.batch_file.write("""#----------------------------------------#
# Name of the batch run
#----------------------------------------#
batch_name = """)
self.batch_file.write(str(self.batch_name))
self.batch_file.write("""
#----------------------------------------#
# Bio-optical parameters list
#----------------------------------------#
saa_list = """)
self.batch_file.write(str(self.saa_values))
self.batch_file.write("""
sza_list = """)
self.batch_file.write(str(self.sza_values))
self.batch_file.write("""
p_list = """)
self.batch_file.write(str(self.p_values))
self.batch_file.write("""
x_list = """)
self.batch_file.write(str(self.x_value))
self.batch_file.write("""
y_list = """)
self.batch_file.write(str(self.y_value))
self.batch_file.write("""
g_list = """)
self.batch_file.write(str(self.g_value))
self.batch_file.write("""
s_list = """)
self.batch_file.write(str(self.s_value))
self.batch_file.write("""
z_list = """)
self.batch_file.write(str(self.z_value))
self.batch_file.write("""
#----------------------------------------#
# Wavelengths
# All IOPs are interpolated to these
# Wavelengths
#----------------------------------------#
wavelengths = """)
self.batch_file.write(str(self.wavelength_values))
self.batch_file.write("""
#----------------------------------------#
# Number of CPUs
# -1 means query the number of CPUs
#----------------------------------------#
num_cpus = """)
self.batch_file.write(str(self.nb_cpu))
self.batch_file.write("""
#----------------------------------------#
# Path of Planarrad
#----------------------------------------#
exec_path = """)
self.batch_file.write(self.executive_path)
self.batch_file.write("""
#----------------------------------------#
# Logging level
#----------------------------------------#
verbose = """)
self.batch_file.write(str(self.verbose_value))
self.batch_file.write("""
#----------------------------------------#
# File paths
# Using absolute paths
#----------------------------------------#
phytoplankton_absorption_file =""")
self.batch_file.write(self.phytoplankton_path)
self.batch_file.write("""
bottom_reflectance_file = """)
self.batch_file.write(self.bottom_path)
self.batch_file.write("""
#----------------------------------------#
# Set the parameter to report
#----------------------------------------#
report_parameter = """)
self.batch_file.write(str(self.report_parameter_value))
self.batch_file.write("""
""")
self.batch_file.close()
#-------------------------------------------------------------------#
# The following is the action to move the file to the good directory.
#-------------------------------------------------------------------#
src = './' + filename
dst = './inputs/batch_files'
os.system("mv" + " " + src + " " + dst)
|
def write_batch_to_file(self, filename='batch_test_default.txt'):
"""
This function creates a new file if he doesn't exist already, moves it to 'inputs/batch_file' folder
and writes data and comments associated to them.
Inputs: saa_values : <list> Sun Azimuth Angle (deg)
sza_values : <list> Sun Zenith Angle (deg)
batch_name : Name of the batch file.
p_values : <list> Phytoplankton linear scaling factor
x_value : <list> Scattering scaling factor
y_value : <list> Scattering slope factor
g_value : <list> CDOM absorption scaling factor
s_value : <list> CDOM absorption slope factor
s_value : <list> depth (m)
waveL_values : Wavelength values used to test.
verbose_value : Number concerning if the software explains a lot or not what it does.
phytoplankton_path : The path to the file containing phytoplankton data.
bottom_path : The path to the file containing bottom data.
nb_cpu : The number of CPU we want to allowed to the software.
executive_path : The path to the file where there is executive files using by PlanarRad.
report_parameter :
"""
#---------------------------------------------------------#
# The following is the file which is passed to planarradpy.
#---------------------------------------------------------#
self.batch_file = open(str(filename), 'w')
self.batch_file.write("""#----------------------------------------#
# Name of the batch run
#----------------------------------------#
batch_name = """)
self.batch_file.write(str(self.batch_name))
self.batch_file.write("""
#----------------------------------------#
# Bio-optical parameters list
#----------------------------------------#
saa_list = """)
self.batch_file.write(str(self.saa_values))
self.batch_file.write("""
sza_list = """)
self.batch_file.write(str(self.sza_values))
self.batch_file.write("""
p_list = """)
self.batch_file.write(str(self.p_values))
self.batch_file.write("""
x_list = """)
self.batch_file.write(str(self.x_value))
self.batch_file.write("""
y_list = """)
self.batch_file.write(str(self.y_value))
self.batch_file.write("""
g_list = """)
self.batch_file.write(str(self.g_value))
self.batch_file.write("""
s_list = """)
self.batch_file.write(str(self.s_value))
self.batch_file.write("""
z_list = """)
self.batch_file.write(str(self.z_value))
self.batch_file.write("""
#----------------------------------------#
# Wavelengths
# All IOPs are interpolated to these
# Wavelengths
#----------------------------------------#
wavelengths = """)
self.batch_file.write(str(self.wavelength_values))
self.batch_file.write("""
#----------------------------------------#
# Number of CPUs
# -1 means query the number of CPUs
#----------------------------------------#
num_cpus = """)
self.batch_file.write(str(self.nb_cpu))
self.batch_file.write("""
#----------------------------------------#
# Path of Planarrad
#----------------------------------------#
exec_path = """)
self.batch_file.write(self.executive_path)
self.batch_file.write("""
#----------------------------------------#
# Logging level
#----------------------------------------#
verbose = """)
self.batch_file.write(str(self.verbose_value))
self.batch_file.write("""
#----------------------------------------#
# File paths
# Using absolute paths
#----------------------------------------#
phytoplankton_absorption_file =""")
self.batch_file.write(self.phytoplankton_path)
self.batch_file.write("""
bottom_reflectance_file = """)
self.batch_file.write(self.bottom_path)
self.batch_file.write("""
#----------------------------------------#
# Set the parameter to report
#----------------------------------------#
report_parameter = """)
self.batch_file.write(str(self.report_parameter_value))
self.batch_file.write("""
""")
self.batch_file.close()
#-------------------------------------------------------------------#
# The following is the action to move the file to the good directory.
#-------------------------------------------------------------------#
src = './' + filename
dst = './inputs/batch_files'
os.system("mv" + " " + src + " " + dst)
|
[
"This",
"function",
"creates",
"a",
"new",
"file",
"if",
"he",
"doesn",
"t",
"exist",
"already",
"moves",
"it",
"to",
"inputs",
"/",
"batch_file",
"folder",
"and",
"writes",
"data",
"and",
"comments",
"associated",
"to",
"them",
".",
"Inputs",
":",
"saa_values",
":",
"<list",
">",
"Sun",
"Azimuth",
"Angle",
"(",
"deg",
")",
"sza_values",
":",
"<list",
">",
"Sun",
"Zenith",
"Angle",
"(",
"deg",
")",
"batch_name",
":",
"Name",
"of",
"the",
"batch",
"file",
".",
"p_values",
":",
"<list",
">",
"Phytoplankton",
"linear",
"scaling",
"factor",
"x_value",
":",
"<list",
">",
"Scattering",
"scaling",
"factor",
"y_value",
":",
"<list",
">",
"Scattering",
"slope",
"factor",
"g_value",
":",
"<list",
">",
"CDOM",
"absorption",
"scaling",
"factor",
"s_value",
":",
"<list",
">",
"CDOM",
"absorption",
"slope",
"factor",
"s_value",
":",
"<list",
">",
"depth",
"(",
"m",
")",
"waveL_values",
":",
"Wavelength",
"values",
"used",
"to",
"test",
".",
"verbose_value",
":",
"Number",
"concerning",
"if",
"the",
"software",
"explains",
"a",
"lot",
"or",
"not",
"what",
"it",
"does",
".",
"phytoplankton_path",
":",
"The",
"path",
"to",
"the",
"file",
"containing",
"phytoplankton",
"data",
".",
"bottom_path",
":",
"The",
"path",
"to",
"the",
"file",
"containing",
"bottom",
"data",
".",
"nb_cpu",
":",
"The",
"number",
"of",
"CPU",
"we",
"want",
"to",
"allowed",
"to",
"the",
"software",
".",
"executive_path",
":",
"The",
"path",
"to",
"the",
"file",
"where",
"there",
"is",
"executive",
"files",
"using",
"by",
"PlanarRad",
".",
"report_parameter",
":"
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_batch.py#L31-L152
|
[
"def",
"write_batch_to_file",
"(",
"self",
",",
"filename",
"=",
"'batch_test_default.txt'",
")",
":",
"#---------------------------------------------------------#",
"# The following is the file which is passed to planarradpy.",
"#---------------------------------------------------------#",
"self",
".",
"batch_file",
"=",
"open",
"(",
"str",
"(",
"filename",
")",
",",
"'w'",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"\"\"\"#----------------------------------------#\n# Name of the batch run\n#----------------------------------------#\nbatch_name = \"\"\"",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"str",
"(",
"self",
".",
"batch_name",
")",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"\"\"\"\n\n#----------------------------------------#\n# Bio-optical parameters list\n#----------------------------------------#\nsaa_list = \"\"\"",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"str",
"(",
"self",
".",
"saa_values",
")",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"\"\"\"\nsza_list = \"\"\"",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"str",
"(",
"self",
".",
"sza_values",
")",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"\"\"\"\np_list = \"\"\"",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"str",
"(",
"self",
".",
"p_values",
")",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"\"\"\"\nx_list = \"\"\"",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"str",
"(",
"self",
".",
"x_value",
")",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"\"\"\"\ny_list = \"\"\"",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"str",
"(",
"self",
".",
"y_value",
")",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"\"\"\"\ng_list = \"\"\"",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"str",
"(",
"self",
".",
"g_value",
")",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"\"\"\"\ns_list = \"\"\"",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"str",
"(",
"self",
".",
"s_value",
")",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"\"\"\"\nz_list = \"\"\"",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"str",
"(",
"self",
".",
"z_value",
")",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"\"\"\"\n\n#----------------------------------------#\n# Wavelengths\n# All IOPs are interpolated to these \n# Wavelengths\n#----------------------------------------#\nwavelengths = \"\"\"",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"str",
"(",
"self",
".",
"wavelength_values",
")",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"\"\"\"\n\n#----------------------------------------#\n# Number of CPUs\n# -1 means query the number of CPUs\n#----------------------------------------#\nnum_cpus = \"\"\"",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"str",
"(",
"self",
".",
"nb_cpu",
")",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"\"\"\"\n\n#----------------------------------------#\n# Path of Planarrad\n#----------------------------------------#\nexec_path = \"\"\"",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"self",
".",
"executive_path",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"\"\"\"\n\n#----------------------------------------#\n# Logging level\n#----------------------------------------#\nverbose = \"\"\"",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"str",
"(",
"self",
".",
"verbose_value",
")",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"\"\"\"\n\n#----------------------------------------#\n# File paths\n# Using absolute paths\n#----------------------------------------#\nphytoplankton_absorption_file =\"\"\"",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"self",
".",
"phytoplankton_path",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"\"\"\"\nbottom_reflectance_file = \"\"\"",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"self",
".",
"bottom_path",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"\"\"\"\n\n#----------------------------------------#\n# Set the parameter to report\n#----------------------------------------#\nreport_parameter = \"\"\"",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"str",
"(",
"self",
".",
"report_parameter_value",
")",
")",
"self",
".",
"batch_file",
".",
"write",
"(",
"\"\"\"\n\n\"\"\"",
")",
"self",
".",
"batch_file",
".",
"close",
"(",
")",
"#-------------------------------------------------------------------#",
"# The following is the action to move the file to the good directory.",
"#-------------------------------------------------------------------#",
"src",
"=",
"'./'",
"+",
"filename",
"dst",
"=",
"'./inputs/batch_files'",
"os",
".",
"system",
"(",
"\"mv\"",
"+",
"\" \"",
"+",
"src",
"+",
"\" \"",
"+",
"dst",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
MplCanvas.update_fields
|
This function will update data that we need to display curves, from "data_processing" from "gui_mainLayout"
Inputs : x_data : An array with wavelengths.
y_data : An array with curve's data.
num_plot : The line, curve to plot.
|
gui/matplotlibwidgetFile.py
|
def update_fields(self, x_data, y_data, num_plot):
"""
This function will update data that we need to display curves, from "data_processing" from "gui_mainLayout"
Inputs : x_data : An array with wavelengths.
y_data : An array with curve's data.
num_plot : The line, curve to plot.
"""
self.x_data = x_data
self.y_data = y_data
self.num_plot = num_plot
|
def update_fields(self, x_data, y_data, num_plot):
"""
This function will update data that we need to display curves, from "data_processing" from "gui_mainLayout"
Inputs : x_data : An array with wavelengths.
y_data : An array with curve's data.
num_plot : The line, curve to plot.
"""
self.x_data = x_data
self.y_data = y_data
self.num_plot = num_plot
|
[
"This",
"function",
"will",
"update",
"data",
"that",
"we",
"need",
"to",
"display",
"curves",
"from",
"data_processing",
"from",
"gui_mainLayout",
"Inputs",
":",
"x_data",
":",
"An",
"array",
"with",
"wavelengths",
".",
"y_data",
":",
"An",
"array",
"with",
"curve",
"s",
"data",
".",
"num_plot",
":",
"The",
"line",
"curve",
"to",
"plot",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/matplotlibwidgetFile.py#L30-L39
|
[
"def",
"update_fields",
"(",
"self",
",",
"x_data",
",",
"y_data",
",",
"num_plot",
")",
":",
"self",
".",
"x_data",
"=",
"x_data",
"self",
".",
"y_data",
"=",
"y_data",
"self",
".",
"num_plot",
"=",
"num_plot"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
MplCanvas.display_graphic
|
This function plots results of a file into the canvas.
Inputs : flag_curves : A boolean to know with we have to plot all curves or not.
ui : The main_Window.
|
gui/matplotlibwidgetFile.py
|
def display_graphic(self, flag_curves, ui):
"""
This function plots results of a file into the canvas.
Inputs : flag_curves : A boolean to know with we have to plot all curves or not.
ui : The main_Window.
"""
ui.graphic_widget.canvas.picture.clear()
x = scipy.linspace(self.x_data[0], self.x_data[-1], len(self.x_data)) #X-axis
curve_wanted = 0 #Iterator on lines of y_data
for curve in self.y_data:
if flag_curves:
if curve_wanted == self.num_plot: #If the iterator is equal of the slider's value, this curve is different
ui.graphic_widget.canvas.picture.plot(x, curve, '-r',
label='Case : {0}/{1}'.format(str(curve_wanted + 1),
str(len(self.y_data))),
linewidth=4)
else:
ui.graphic_widget.canvas.picture.plot(x, curve, '0.75')
else:
if curve_wanted == self.num_plot:
ui.graphic_widget.canvas.picture.plot(x, curve, '-r',
label='Case : {0}/{1}'.format(str(curve_wanted + 1),
str(len(self.y_data))))
curve_wanted += 1
ui.graphic_widget.canvas.picture.set_title('Rrs.csv')
ui.graphic_widget.canvas.picture.set_xlabel('Wavelength (${nm}$)')
ui.graphic_widget.canvas.picture.set_ylabel('Reflectance ($Sr^{-1}$)')
self.legend = ui.graphic_widget.canvas.picture.legend() #Display in a legend curves's labels.
ui.graphic_widget.canvas.picture.legend(bbox_to_anchor=(1.1, 1.05))
ui.graphic_widget.canvas.draw()
|
def display_graphic(self, flag_curves, ui):
"""
This function plots results of a file into the canvas.
Inputs : flag_curves : A boolean to know with we have to plot all curves or not.
ui : The main_Window.
"""
ui.graphic_widget.canvas.picture.clear()
x = scipy.linspace(self.x_data[0], self.x_data[-1], len(self.x_data)) #X-axis
curve_wanted = 0 #Iterator on lines of y_data
for curve in self.y_data:
if flag_curves:
if curve_wanted == self.num_plot: #If the iterator is equal of the slider's value, this curve is different
ui.graphic_widget.canvas.picture.plot(x, curve, '-r',
label='Case : {0}/{1}'.format(str(curve_wanted + 1),
str(len(self.y_data))),
linewidth=4)
else:
ui.graphic_widget.canvas.picture.plot(x, curve, '0.75')
else:
if curve_wanted == self.num_plot:
ui.graphic_widget.canvas.picture.plot(x, curve, '-r',
label='Case : {0}/{1}'.format(str(curve_wanted + 1),
str(len(self.y_data))))
curve_wanted += 1
ui.graphic_widget.canvas.picture.set_title('Rrs.csv')
ui.graphic_widget.canvas.picture.set_xlabel('Wavelength (${nm}$)')
ui.graphic_widget.canvas.picture.set_ylabel('Reflectance ($Sr^{-1}$)')
self.legend = ui.graphic_widget.canvas.picture.legend() #Display in a legend curves's labels.
ui.graphic_widget.canvas.picture.legend(bbox_to_anchor=(1.1, 1.05))
ui.graphic_widget.canvas.draw()
|
[
"This",
"function",
"plots",
"results",
"of",
"a",
"file",
"into",
"the",
"canvas",
".",
"Inputs",
":",
"flag_curves",
":",
"A",
"boolean",
"to",
"know",
"with",
"we",
"have",
"to",
"plot",
"all",
"curves",
"or",
"not",
".",
"ui",
":",
"The",
"main_Window",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/matplotlibwidgetFile.py#L41-L74
|
[
"def",
"display_graphic",
"(",
"self",
",",
"flag_curves",
",",
"ui",
")",
":",
"ui",
".",
"graphic_widget",
".",
"canvas",
".",
"picture",
".",
"clear",
"(",
")",
"x",
"=",
"scipy",
".",
"linspace",
"(",
"self",
".",
"x_data",
"[",
"0",
"]",
",",
"self",
".",
"x_data",
"[",
"-",
"1",
"]",
",",
"len",
"(",
"self",
".",
"x_data",
")",
")",
"#X-axis",
"curve_wanted",
"=",
"0",
"#Iterator on lines of y_data",
"for",
"curve",
"in",
"self",
".",
"y_data",
":",
"if",
"flag_curves",
":",
"if",
"curve_wanted",
"==",
"self",
".",
"num_plot",
":",
"#If the iterator is equal of the slider's value, this curve is different",
"ui",
".",
"graphic_widget",
".",
"canvas",
".",
"picture",
".",
"plot",
"(",
"x",
",",
"curve",
",",
"'-r'",
",",
"label",
"=",
"'Case : {0}/{1}'",
".",
"format",
"(",
"str",
"(",
"curve_wanted",
"+",
"1",
")",
",",
"str",
"(",
"len",
"(",
"self",
".",
"y_data",
")",
")",
")",
",",
"linewidth",
"=",
"4",
")",
"else",
":",
"ui",
".",
"graphic_widget",
".",
"canvas",
".",
"picture",
".",
"plot",
"(",
"x",
",",
"curve",
",",
"'0.75'",
")",
"else",
":",
"if",
"curve_wanted",
"==",
"self",
".",
"num_plot",
":",
"ui",
".",
"graphic_widget",
".",
"canvas",
".",
"picture",
".",
"plot",
"(",
"x",
",",
"curve",
",",
"'-r'",
",",
"label",
"=",
"'Case : {0}/{1}'",
".",
"format",
"(",
"str",
"(",
"curve_wanted",
"+",
"1",
")",
",",
"str",
"(",
"len",
"(",
"self",
".",
"y_data",
")",
")",
")",
")",
"curve_wanted",
"+=",
"1",
"ui",
".",
"graphic_widget",
".",
"canvas",
".",
"picture",
".",
"set_title",
"(",
"'Rrs.csv'",
")",
"ui",
".",
"graphic_widget",
".",
"canvas",
".",
"picture",
".",
"set_xlabel",
"(",
"'Wavelength (${nm}$)'",
")",
"ui",
".",
"graphic_widget",
".",
"canvas",
".",
"picture",
".",
"set_ylabel",
"(",
"'Reflectance ($Sr^{-1}$)'",
")",
"self",
".",
"legend",
"=",
"ui",
".",
"graphic_widget",
".",
"canvas",
".",
"picture",
".",
"legend",
"(",
")",
"#Display in a legend curves's labels.",
"ui",
".",
"graphic_widget",
".",
"canvas",
".",
"picture",
".",
"legend",
"(",
"bbox_to_anchor",
"=",
"(",
"1.1",
",",
"1.05",
")",
")",
"ui",
".",
"graphic_widget",
".",
"canvas",
".",
"draw",
"(",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
SignalHandler.set_handler
|
Takes a list of signals and sets a handler for them
|
listen/signal_handler.py
|
def set_handler(self, signals, handler=signal.SIG_DFL):
""" Takes a list of signals and sets a handler for them """
for sig in signals:
self.log.debug("Creating handler for signal: {0}".format(sig))
signal.signal(sig, handler)
|
def set_handler(self, signals, handler=signal.SIG_DFL):
""" Takes a list of signals and sets a handler for them """
for sig in signals:
self.log.debug("Creating handler for signal: {0}".format(sig))
signal.signal(sig, handler)
|
[
"Takes",
"a",
"list",
"of",
"signals",
"and",
"sets",
"a",
"handler",
"for",
"them"
] |
antevens/listen
|
python
|
https://github.com/antevens/listen/blob/d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67/listen/signal_handler.py#L55-L59
|
[
"def",
"set_handler",
"(",
"self",
",",
"signals",
",",
"handler",
"=",
"signal",
".",
"SIG_DFL",
")",
":",
"for",
"sig",
"in",
"signals",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Creating handler for signal: {0}\"",
".",
"format",
"(",
"sig",
")",
")",
"signal",
".",
"signal",
"(",
"sig",
",",
"handler",
")"
] |
d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67
|
test
|
SignalHandler.pseudo_handler
|
Pseudo handler placeholder while signal is beind processed
|
listen/signal_handler.py
|
def pseudo_handler(self, signum, frame):
""" Pseudo handler placeholder while signal is beind processed """
self.log.warn("Received sigal {0} but system is already busy processing a previous signal, current frame: {1}".format(signum, str(frame)))
|
def pseudo_handler(self, signum, frame):
""" Pseudo handler placeholder while signal is beind processed """
self.log.warn("Received sigal {0} but system is already busy processing a previous signal, current frame: {1}".format(signum, str(frame)))
|
[
"Pseudo",
"handler",
"placeholder",
"while",
"signal",
"is",
"beind",
"processed"
] |
antevens/listen
|
python
|
https://github.com/antevens/listen/blob/d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67/listen/signal_handler.py#L61-L63
|
[
"def",
"pseudo_handler",
"(",
"self",
",",
"signum",
",",
"frame",
")",
":",
"self",
".",
"log",
".",
"warn",
"(",
"\"Received sigal {0} but system is already busy processing a previous signal, current frame: {1}\"",
".",
"format",
"(",
"signum",
",",
"str",
"(",
"frame",
")",
")",
")"
] |
d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67
|
test
|
SignalHandler.default_handler
|
Default handler, a generic callback method for signal processing
|
listen/signal_handler.py
|
def default_handler(self, signum, frame):
""" Default handler, a generic callback method for signal processing"""
self.log.debug("Signal handler called with signal: {0}".format(signum))
# 1. If signal is HUP restart the python process
# 2. If signal is TERM, INT or QUIT we try to cleanup then exit with -1
# 3. If signal is STOP or TSTP we pause
# 4. If signal is CONT or USR1 we continue
# 5. If signal is INFO we print status
# 6. If signal is USR2 we we abort and then exit with -1
if signum in self.restart_signals:
self.set_handler(self.handled_signals, self.pseudo_handler)
self._cleanup()
os.execl('python', 'python', * sys.argv)
elif signum in self.abort_signals:
self.abort(signum)
elif signum in self.pause_signals:
self.pause(signum)
elif signum in self.resume_signals:
self.resume(signum)
elif signum in self.status_signals:
self.status(signum)
elif signum in self.error_signals:
self.log.error('Signal handler received error signal from an external process, aborting')
self.abort(signum)
else:
self.log.error("Unhandled signal received: {0}".format(signum))
raise
|
def default_handler(self, signum, frame):
""" Default handler, a generic callback method for signal processing"""
self.log.debug("Signal handler called with signal: {0}".format(signum))
# 1. If signal is HUP restart the python process
# 2. If signal is TERM, INT or QUIT we try to cleanup then exit with -1
# 3. If signal is STOP or TSTP we pause
# 4. If signal is CONT or USR1 we continue
# 5. If signal is INFO we print status
# 6. If signal is USR2 we we abort and then exit with -1
if signum in self.restart_signals:
self.set_handler(self.handled_signals, self.pseudo_handler)
self._cleanup()
os.execl('python', 'python', * sys.argv)
elif signum in self.abort_signals:
self.abort(signum)
elif signum in self.pause_signals:
self.pause(signum)
elif signum in self.resume_signals:
self.resume(signum)
elif signum in self.status_signals:
self.status(signum)
elif signum in self.error_signals:
self.log.error('Signal handler received error signal from an external process, aborting')
self.abort(signum)
else:
self.log.error("Unhandled signal received: {0}".format(signum))
raise
|
[
"Default",
"handler",
"a",
"generic",
"callback",
"method",
"for",
"signal",
"processing"
] |
antevens/listen
|
python
|
https://github.com/antevens/listen/blob/d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67/listen/signal_handler.py#L65-L92
|
[
"def",
"default_handler",
"(",
"self",
",",
"signum",
",",
"frame",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Signal handler called with signal: {0}\"",
".",
"format",
"(",
"signum",
")",
")",
"# 1. If signal is HUP restart the python process",
"# 2. If signal is TERM, INT or QUIT we try to cleanup then exit with -1",
"# 3. If signal is STOP or TSTP we pause",
"# 4. If signal is CONT or USR1 we continue",
"# 5. If signal is INFO we print status",
"# 6. If signal is USR2 we we abort and then exit with -1",
"if",
"signum",
"in",
"self",
".",
"restart_signals",
":",
"self",
".",
"set_handler",
"(",
"self",
".",
"handled_signals",
",",
"self",
".",
"pseudo_handler",
")",
"self",
".",
"_cleanup",
"(",
")",
"os",
".",
"execl",
"(",
"'python'",
",",
"'python'",
",",
"*",
"sys",
".",
"argv",
")",
"elif",
"signum",
"in",
"self",
".",
"abort_signals",
":",
"self",
".",
"abort",
"(",
"signum",
")",
"elif",
"signum",
"in",
"self",
".",
"pause_signals",
":",
"self",
".",
"pause",
"(",
"signum",
")",
"elif",
"signum",
"in",
"self",
".",
"resume_signals",
":",
"self",
".",
"resume",
"(",
"signum",
")",
"elif",
"signum",
"in",
"self",
".",
"status_signals",
":",
"self",
".",
"status",
"(",
"signum",
")",
"elif",
"signum",
"in",
"self",
".",
"error_signals",
":",
"self",
".",
"log",
".",
"error",
"(",
"'Signal handler received error signal from an external process, aborting'",
")",
"self",
".",
"abort",
"(",
"signum",
")",
"else",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Unhandled signal received: {0}\"",
".",
"format",
"(",
"signum",
")",
")",
"raise"
] |
d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67
|
test
|
SignalHandler.pause
|
Pause execution, execution will resume in X seconds or when the
appropriate resume signal is received. Execution will jump to the
callback_function, the default callback function is the handler
method which will run all tasks registered with the reg_on_resume
methodi.
Returns True if timer expired, otherwise returns False
|
listen/signal_handler.py
|
def pause(self, signum, seconds=0, callback_function=None):
"""
Pause execution, execution will resume in X seconds or when the
appropriate resume signal is received. Execution will jump to the
callback_function, the default callback function is the handler
method which will run all tasks registered with the reg_on_resume
methodi.
Returns True if timer expired, otherwise returns False
"""
if callback_function is None:
callback_function = self.default_handler
if seconds > 0:
self.log.info("Signal handler pausing for {0} seconds or until it receives SIGALRM or SIGCONT".format(seconds))
signal.signal(signal.SIGALRM, callback_function)
signal.alarm(seconds)
else:
self.log.info('Signal handler pausing until it receives SIGALRM or SIGCONT')
signal.signal(signal.SIGCONT, callback_function)
signal.pause()
self.log.info('Signal handler resuming from pause')
if signum == signal.SIGALRM:
return True
else:
return False
|
def pause(self, signum, seconds=0, callback_function=None):
"""
Pause execution, execution will resume in X seconds or when the
appropriate resume signal is received. Execution will jump to the
callback_function, the default callback function is the handler
method which will run all tasks registered with the reg_on_resume
methodi.
Returns True if timer expired, otherwise returns False
"""
if callback_function is None:
callback_function = self.default_handler
if seconds > 0:
self.log.info("Signal handler pausing for {0} seconds or until it receives SIGALRM or SIGCONT".format(seconds))
signal.signal(signal.SIGALRM, callback_function)
signal.alarm(seconds)
else:
self.log.info('Signal handler pausing until it receives SIGALRM or SIGCONT')
signal.signal(signal.SIGCONT, callback_function)
signal.pause()
self.log.info('Signal handler resuming from pause')
if signum == signal.SIGALRM:
return True
else:
return False
|
[
"Pause",
"execution",
"execution",
"will",
"resume",
"in",
"X",
"seconds",
"or",
"when",
"the",
"appropriate",
"resume",
"signal",
"is",
"received",
".",
"Execution",
"will",
"jump",
"to",
"the",
"callback_function",
"the",
"default",
"callback",
"function",
"is",
"the",
"handler",
"method",
"which",
"will",
"run",
"all",
"tasks",
"registered",
"with",
"the",
"reg_on_resume",
"methodi",
".",
"Returns",
"True",
"if",
"timer",
"expired",
"otherwise",
"returns",
"False"
] |
antevens/listen
|
python
|
https://github.com/antevens/listen/blob/d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67/listen/signal_handler.py#L94-L117
|
[
"def",
"pause",
"(",
"self",
",",
"signum",
",",
"seconds",
"=",
"0",
",",
"callback_function",
"=",
"None",
")",
":",
"if",
"callback_function",
"is",
"None",
":",
"callback_function",
"=",
"self",
".",
"default_handler",
"if",
"seconds",
">",
"0",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Signal handler pausing for {0} seconds or until it receives SIGALRM or SIGCONT\"",
".",
"format",
"(",
"seconds",
")",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGALRM",
",",
"callback_function",
")",
"signal",
".",
"alarm",
"(",
"seconds",
")",
"else",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Signal handler pausing until it receives SIGALRM or SIGCONT'",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGCONT",
",",
"callback_function",
")",
"signal",
".",
"pause",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Signal handler resuming from pause'",
")",
"if",
"signum",
"==",
"signal",
".",
"SIGALRM",
":",
"return",
"True",
"else",
":",
"return",
"False"
] |
d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67
|
test
|
SignalHandler.abort
|
Run all abort tasks, then all exit tasks, then exit with error
return status
|
listen/signal_handler.py
|
def abort(self, signum):
""" Run all abort tasks, then all exit tasks, then exit with error
return status"""
self.log.info('Signal handler received abort request')
self._abort(signum)
self._exit(signum)
os._exit(1)
|
def abort(self, signum):
""" Run all abort tasks, then all exit tasks, then exit with error
return status"""
self.log.info('Signal handler received abort request')
self._abort(signum)
self._exit(signum)
os._exit(1)
|
[
"Run",
"all",
"abort",
"tasks",
"then",
"all",
"exit",
"tasks",
"then",
"exit",
"with",
"error",
"return",
"status"
] |
antevens/listen
|
python
|
https://github.com/antevens/listen/blob/d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67/listen/signal_handler.py#L119-L125
|
[
"def",
"abort",
"(",
"self",
",",
"signum",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Signal handler received abort request'",
")",
"self",
".",
"_abort",
"(",
"signum",
")",
"self",
".",
"_exit",
"(",
"signum",
")",
"os",
".",
"_exit",
"(",
"1",
")"
] |
d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67
|
test
|
SignalHandler.status
|
Run all status tasks, then run all tasks in the resume queue
|
listen/signal_handler.py
|
def status(self, signum):
""" Run all status tasks, then run all tasks in the resume queue"""
self.log.debug('Signal handler got status signal')
new_status_callbacks = []
for status_call in self.status_callbacks:
# If callback is non persistent we remove it
try:
self.log.debug("Calling {0}({1},{2})".format(status_call['function'].__name__, status_call['args'], status_call['kwargs']))
except AttributeError:
self.log.debug("Calling unbound function/method {0}".format(str(status_call)))
apply(status_call['function'], status_call['args'], status_call['kwargs'])
if status_call['persistent']:
new_status_callbacks.append(status_call)
self.status_callbacks = new_status_callbacks
self._resume(signum)
|
def status(self, signum):
""" Run all status tasks, then run all tasks in the resume queue"""
self.log.debug('Signal handler got status signal')
new_status_callbacks = []
for status_call in self.status_callbacks:
# If callback is non persistent we remove it
try:
self.log.debug("Calling {0}({1},{2})".format(status_call['function'].__name__, status_call['args'], status_call['kwargs']))
except AttributeError:
self.log.debug("Calling unbound function/method {0}".format(str(status_call)))
apply(status_call['function'], status_call['args'], status_call['kwargs'])
if status_call['persistent']:
new_status_callbacks.append(status_call)
self.status_callbacks = new_status_callbacks
self._resume(signum)
|
[
"Run",
"all",
"status",
"tasks",
"then",
"run",
"all",
"tasks",
"in",
"the",
"resume",
"queue"
] |
antevens/listen
|
python
|
https://github.com/antevens/listen/blob/d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67/listen/signal_handler.py#L127-L142
|
[
"def",
"status",
"(",
"self",
",",
"signum",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Signal handler got status signal'",
")",
"new_status_callbacks",
"=",
"[",
"]",
"for",
"status_call",
"in",
"self",
".",
"status_callbacks",
":",
"# If callback is non persistent we remove it",
"try",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Calling {0}({1},{2})\"",
".",
"format",
"(",
"status_call",
"[",
"'function'",
"]",
".",
"__name__",
",",
"status_call",
"[",
"'args'",
"]",
",",
"status_call",
"[",
"'kwargs'",
"]",
")",
")",
"except",
"AttributeError",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Calling unbound function/method {0}\"",
".",
"format",
"(",
"str",
"(",
"status_call",
")",
")",
")",
"apply",
"(",
"status_call",
"[",
"'function'",
"]",
",",
"status_call",
"[",
"'args'",
"]",
",",
"status_call",
"[",
"'kwargs'",
"]",
")",
"if",
"status_call",
"[",
"'persistent'",
"]",
":",
"new_status_callbacks",
".",
"append",
"(",
"status_call",
")",
"self",
".",
"status_callbacks",
"=",
"new_status_callbacks",
"self",
".",
"_resume",
"(",
"signum",
")"
] |
d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67
|
test
|
SignalHandler._unreg_event
|
Tries to remove a registered event without triggering it
|
listen/signal_handler.py
|
def _unreg_event(self, event_list, event):
""" Tries to remove a registered event without triggering it """
try:
self.log.debug("Removing event {0}({1},{2})".format(event['function'].__name__, event['args'], event['kwargs']))
except AttributeError:
self.log.debug("Removing event {0}".format(str(event)))
try:
event_list.remove(event)
except ValueError:
try:
self.log.warn("Unable to remove event {0}({1},{2}) , not found in list: {3}".format(event['function'].__name__, event['args'], event['kwargs'], event_list))
except AttributeError:
self.log.debug("Unable to remove event {0}".format(str(event)))
raise KeyError('Unable to unregister the specified event from the signals specified')
|
def _unreg_event(self, event_list, event):
""" Tries to remove a registered event without triggering it """
try:
self.log.debug("Removing event {0}({1},{2})".format(event['function'].__name__, event['args'], event['kwargs']))
except AttributeError:
self.log.debug("Removing event {0}".format(str(event)))
try:
event_list.remove(event)
except ValueError:
try:
self.log.warn("Unable to remove event {0}({1},{2}) , not found in list: {3}".format(event['function'].__name__, event['args'], event['kwargs'], event_list))
except AttributeError:
self.log.debug("Unable to remove event {0}".format(str(event)))
raise KeyError('Unable to unregister the specified event from the signals specified')
|
[
"Tries",
"to",
"remove",
"a",
"registered",
"event",
"without",
"triggering",
"it"
] |
antevens/listen
|
python
|
https://github.com/antevens/listen/blob/d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67/listen/signal_handler.py#L190-L204
|
[
"def",
"_unreg_event",
"(",
"self",
",",
"event_list",
",",
"event",
")",
":",
"try",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Removing event {0}({1},{2})\"",
".",
"format",
"(",
"event",
"[",
"'function'",
"]",
".",
"__name__",
",",
"event",
"[",
"'args'",
"]",
",",
"event",
"[",
"'kwargs'",
"]",
")",
")",
"except",
"AttributeError",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Removing event {0}\"",
".",
"format",
"(",
"str",
"(",
"event",
")",
")",
")",
"try",
":",
"event_list",
".",
"remove",
"(",
"event",
")",
"except",
"ValueError",
":",
"try",
":",
"self",
".",
"log",
".",
"warn",
"(",
"\"Unable to remove event {0}({1},{2}) , not found in list: {3}\"",
".",
"format",
"(",
"event",
"[",
"'function'",
"]",
".",
"__name__",
",",
"event",
"[",
"'args'",
"]",
",",
"event",
"[",
"'kwargs'",
"]",
",",
"event_list",
")",
")",
"except",
"AttributeError",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Unable to remove event {0}\"",
".",
"format",
"(",
"str",
"(",
"event",
")",
")",
")",
"raise",
"KeyError",
"(",
"'Unable to unregister the specified event from the signals specified'",
")"
] |
d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67
|
test
|
SignalHandler.reg_on_exit
|
Register a function/method to be called on program exit,
will get executed regardless of successs/failure of the program running
|
listen/signal_handler.py
|
def reg_on_exit(self, callable_object, *args, **kwargs):
""" Register a function/method to be called on program exit,
will get executed regardless of successs/failure of the program running """
persistent = kwargs.pop('persistent', False)
event = self._create_event(callable_object, 'exit', persistent, *args, **kwargs)
self.exit_callbacks.append(event)
return event
|
def reg_on_exit(self, callable_object, *args, **kwargs):
""" Register a function/method to be called on program exit,
will get executed regardless of successs/failure of the program running """
persistent = kwargs.pop('persistent', False)
event = self._create_event(callable_object, 'exit', persistent, *args, **kwargs)
self.exit_callbacks.append(event)
return event
|
[
"Register",
"a",
"function",
"/",
"method",
"to",
"be",
"called",
"on",
"program",
"exit",
"will",
"get",
"executed",
"regardless",
"of",
"successs",
"/",
"failure",
"of",
"the",
"program",
"running"
] |
antevens/listen
|
python
|
https://github.com/antevens/listen/blob/d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67/listen/signal_handler.py#L232-L238
|
[
"def",
"reg_on_exit",
"(",
"self",
",",
"callable_object",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"persistent",
"=",
"kwargs",
".",
"pop",
"(",
"'persistent'",
",",
"False",
")",
"event",
"=",
"self",
".",
"_create_event",
"(",
"callable_object",
",",
"'exit'",
",",
"persistent",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"exit_callbacks",
".",
"append",
"(",
"event",
")",
"return",
"event"
] |
d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67
|
test
|
SignalHandler.reg_on_abort
|
Register a function/method to be called when execution is aborted
|
listen/signal_handler.py
|
def reg_on_abort(self, callable_object, *args, **kwargs):
""" Register a function/method to be called when execution is aborted"""
persistent = kwargs.pop('persistent', False)
event = self._create_event(callable_object, 'abort', persistent, *args, **kwargs)
self.abort_callbacks.append(event)
return event
|
def reg_on_abort(self, callable_object, *args, **kwargs):
""" Register a function/method to be called when execution is aborted"""
persistent = kwargs.pop('persistent', False)
event = self._create_event(callable_object, 'abort', persistent, *args, **kwargs)
self.abort_callbacks.append(event)
return event
|
[
"Register",
"a",
"function",
"/",
"method",
"to",
"be",
"called",
"when",
"execution",
"is",
"aborted"
] |
antevens/listen
|
python
|
https://github.com/antevens/listen/blob/d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67/listen/signal_handler.py#L240-L245
|
[
"def",
"reg_on_abort",
"(",
"self",
",",
"callable_object",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"persistent",
"=",
"kwargs",
".",
"pop",
"(",
"'persistent'",
",",
"False",
")",
"event",
"=",
"self",
".",
"_create_event",
"(",
"callable_object",
",",
"'abort'",
",",
"persistent",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"abort_callbacks",
".",
"append",
"(",
"event",
")",
"return",
"event"
] |
d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67
|
test
|
SignalHandler.reg_on_status
|
Register a function/method to be called when a user or another
program asks for an update, when status is done it will start running
any tasks registered with the reg_on_resume method
|
listen/signal_handler.py
|
def reg_on_status(self, callable_object, *args, **kwargs):
""" Register a function/method to be called when a user or another
program asks for an update, when status is done it will start running
any tasks registered with the reg_on_resume method"""
persistent = kwargs.pop('persistent', False)
event = self._create_event(callable_object, 'status', persistent, *args, **kwargs)
self.status_callbacks.append(event)
return event
|
def reg_on_status(self, callable_object, *args, **kwargs):
""" Register a function/method to be called when a user or another
program asks for an update, when status is done it will start running
any tasks registered with the reg_on_resume method"""
persistent = kwargs.pop('persistent', False)
event = self._create_event(callable_object, 'status', persistent, *args, **kwargs)
self.status_callbacks.append(event)
return event
|
[
"Register",
"a",
"function",
"/",
"method",
"to",
"be",
"called",
"when",
"a",
"user",
"or",
"another",
"program",
"asks",
"for",
"an",
"update",
"when",
"status",
"is",
"done",
"it",
"will",
"start",
"running",
"any",
"tasks",
"registered",
"with",
"the",
"reg_on_resume",
"method"
] |
antevens/listen
|
python
|
https://github.com/antevens/listen/blob/d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67/listen/signal_handler.py#L247-L254
|
[
"def",
"reg_on_status",
"(",
"self",
",",
"callable_object",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"persistent",
"=",
"kwargs",
".",
"pop",
"(",
"'persistent'",
",",
"False",
")",
"event",
"=",
"self",
".",
"_create_event",
"(",
"callable_object",
",",
"'status'",
",",
"persistent",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"status_callbacks",
".",
"append",
"(",
"event",
")",
"return",
"event"
] |
d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67
|
test
|
SignalHandler.reg_on_resume
|
Register a function/method to be called if the system needs to
resume a previously halted or paused execution, including status
requests.
|
listen/signal_handler.py
|
def reg_on_resume(self, callable_object, *args, **kwargs):
""" Register a function/method to be called if the system needs to
resume a previously halted or paused execution, including status
requests."""
persistent = kwargs.pop('persistent', False)
event = self._create_event(callable_object, 'resume', persistent, *args, **kwargs)
self.resume_callbacks.append(event)
return event
|
def reg_on_resume(self, callable_object, *args, **kwargs):
""" Register a function/method to be called if the system needs to
resume a previously halted or paused execution, including status
requests."""
persistent = kwargs.pop('persistent', False)
event = self._create_event(callable_object, 'resume', persistent, *args, **kwargs)
self.resume_callbacks.append(event)
return event
|
[
"Register",
"a",
"function",
"/",
"method",
"to",
"be",
"called",
"if",
"the",
"system",
"needs",
"to",
"resume",
"a",
"previously",
"halted",
"or",
"paused",
"execution",
"including",
"status",
"requests",
"."
] |
antevens/listen
|
python
|
https://github.com/antevens/listen/blob/d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67/listen/signal_handler.py#L256-L263
|
[
"def",
"reg_on_resume",
"(",
"self",
",",
"callable_object",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"persistent",
"=",
"kwargs",
".",
"pop",
"(",
"'persistent'",
",",
"False",
")",
"event",
"=",
"self",
".",
"_create_event",
"(",
"callable_object",
",",
"'resume'",
",",
"persistent",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"resume_callbacks",
".",
"append",
"(",
"event",
")",
"return",
"event"
] |
d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67
|
test
|
Connection.fetch_metric
|
Fetch time series data from OpenTSDB
Parameters:
metric:
A string representing a valid OpenTSDB metric.
tags:
A dict mapping tag names to tag values. Tag names and values are
always strings.
{ 'user_id': '44' }
start:
A datetime.datetime-like object representing the start of the
range to query over.
end:
A datetime.datetime-like object representing the end of the
range to query over.
aggregator:
The function for merging multiple time series together. For
example, if the "user_id" tag is not specified, this aggregator
function is used to combine all heart rate time series into one
time series. (Yes, this isn't very useful.)
For queries that return only one time series, this parameter is
not relevant.
Valid values: "sum", "min", "max", "avg", "dev"
See: http://opentsdb.net/docs/build/html/user_guide/query/aggregators.html
downsampling:
A relative time interval to "downsample". This isn't true
downsampling; rather, if you specify a downsampling of "5m"
(five minutes), OpenTSDB will split data into five minute
intervals, and return one data point in the middle of each
interval whose value is the average of all data points within
that interval.
Valid relative time values are strings of the following format:
"<amount><time_unit>"
Valid time units: "ms", "s", "m", "h", "d", "w", "n", "y"
Date and time format: http://opentsdb.net/docs/build/html/user_guide/query/dates.html
ms_resolution:
Whether or not to output data point timestamps in milliseconds
or seconds. If this flag is false and there are multiple
data points within a second, those data points will be down
sampled using the query's aggregation function.
Returns:
A dict mapping timestamps to data points
|
pytsdb.py
|
def fetch_metric(self, metric, start, end, tags={}, aggregator="sum",
downsample=None, ms_resolution=True):
"""Fetch time series data from OpenTSDB
Parameters:
metric:
A string representing a valid OpenTSDB metric.
tags:
A dict mapping tag names to tag values. Tag names and values are
always strings.
{ 'user_id': '44' }
start:
A datetime.datetime-like object representing the start of the
range to query over.
end:
A datetime.datetime-like object representing the end of the
range to query over.
aggregator:
The function for merging multiple time series together. For
example, if the "user_id" tag is not specified, this aggregator
function is used to combine all heart rate time series into one
time series. (Yes, this isn't very useful.)
For queries that return only one time series, this parameter is
not relevant.
Valid values: "sum", "min", "max", "avg", "dev"
See: http://opentsdb.net/docs/build/html/user_guide/query/aggregators.html
downsampling:
A relative time interval to "downsample". This isn't true
downsampling; rather, if you specify a downsampling of "5m"
(five minutes), OpenTSDB will split data into five minute
intervals, and return one data point in the middle of each
interval whose value is the average of all data points within
that interval.
Valid relative time values are strings of the following format:
"<amount><time_unit>"
Valid time units: "ms", "s", "m", "h", "d", "w", "n", "y"
Date and time format: http://opentsdb.net/docs/build/html/user_guide/query/dates.html
ms_resolution:
Whether or not to output data point timestamps in milliseconds
or seconds. If this flag is false and there are multiple
data points within a second, those data points will be down
sampled using the query's aggregation function.
Returns:
A dict mapping timestamps to data points
"""
query = "{aggregator}:{downsample}{metric}{{{tags}}}".format(
aggregator=aggregator,
downsample=downsample + "-avg:" if downsample else "",
metric=metric,
tags=','.join("%s=%s" % (k, v) for k, v in tags.items())
)
params = {
'ms': ms_resolution,
'start': '{0:.3f}'.format(start.timestamp()),
'end': '{0:.3f}'.format(end.timestamp()),
'm': query
}
response = self.__request("/query", params)
if response.status_code == 200:
try:
return response.json()[0]['dps']
except IndexError:
# empty data set
return {}
raise QueryError(response.json())
|
def fetch_metric(self, metric, start, end, tags={}, aggregator="sum",
downsample=None, ms_resolution=True):
"""Fetch time series data from OpenTSDB
Parameters:
metric:
A string representing a valid OpenTSDB metric.
tags:
A dict mapping tag names to tag values. Tag names and values are
always strings.
{ 'user_id': '44' }
start:
A datetime.datetime-like object representing the start of the
range to query over.
end:
A datetime.datetime-like object representing the end of the
range to query over.
aggregator:
The function for merging multiple time series together. For
example, if the "user_id" tag is not specified, this aggregator
function is used to combine all heart rate time series into one
time series. (Yes, this isn't very useful.)
For queries that return only one time series, this parameter is
not relevant.
Valid values: "sum", "min", "max", "avg", "dev"
See: http://opentsdb.net/docs/build/html/user_guide/query/aggregators.html
downsampling:
A relative time interval to "downsample". This isn't true
downsampling; rather, if you specify a downsampling of "5m"
(five minutes), OpenTSDB will split data into five minute
intervals, and return one data point in the middle of each
interval whose value is the average of all data points within
that interval.
Valid relative time values are strings of the following format:
"<amount><time_unit>"
Valid time units: "ms", "s", "m", "h", "d", "w", "n", "y"
Date and time format: http://opentsdb.net/docs/build/html/user_guide/query/dates.html
ms_resolution:
Whether or not to output data point timestamps in milliseconds
or seconds. If this flag is false and there are multiple
data points within a second, those data points will be down
sampled using the query's aggregation function.
Returns:
A dict mapping timestamps to data points
"""
query = "{aggregator}:{downsample}{metric}{{{tags}}}".format(
aggregator=aggregator,
downsample=downsample + "-avg:" if downsample else "",
metric=metric,
tags=','.join("%s=%s" % (k, v) for k, v in tags.items())
)
params = {
'ms': ms_resolution,
'start': '{0:.3f}'.format(start.timestamp()),
'end': '{0:.3f}'.format(end.timestamp()),
'm': query
}
response = self.__request("/query", params)
if response.status_code == 200:
try:
return response.json()[0]['dps']
except IndexError:
# empty data set
return {}
raise QueryError(response.json())
|
[
"Fetch",
"time",
"series",
"data",
"from",
"OpenTSDB"
] |
WhoopInc/pytsdb
|
python
|
https://github.com/WhoopInc/pytsdb/blob/f053c2effa7a9175c5521e690dbb23700c6c0cfe/pytsdb.py#L20-L101
|
[
"def",
"fetch_metric",
"(",
"self",
",",
"metric",
",",
"start",
",",
"end",
",",
"tags",
"=",
"{",
"}",
",",
"aggregator",
"=",
"\"sum\"",
",",
"downsample",
"=",
"None",
",",
"ms_resolution",
"=",
"True",
")",
":",
"query",
"=",
"\"{aggregator}:{downsample}{metric}{{{tags}}}\"",
".",
"format",
"(",
"aggregator",
"=",
"aggregator",
",",
"downsample",
"=",
"downsample",
"+",
"\"-avg:\"",
"if",
"downsample",
"else",
"\"\"",
",",
"metric",
"=",
"metric",
",",
"tags",
"=",
"','",
".",
"join",
"(",
"\"%s=%s\"",
"%",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"tags",
".",
"items",
"(",
")",
")",
")",
"params",
"=",
"{",
"'ms'",
":",
"ms_resolution",
",",
"'start'",
":",
"'{0:.3f}'",
".",
"format",
"(",
"start",
".",
"timestamp",
"(",
")",
")",
",",
"'end'",
":",
"'{0:.3f}'",
".",
"format",
"(",
"end",
".",
"timestamp",
"(",
")",
")",
",",
"'m'",
":",
"query",
"}",
"response",
"=",
"self",
".",
"__request",
"(",
"\"/query\"",
",",
"params",
")",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"try",
":",
"return",
"response",
".",
"json",
"(",
")",
"[",
"0",
"]",
"[",
"'dps'",
"]",
"except",
"IndexError",
":",
"# empty data set",
"return",
"{",
"}",
"raise",
"QueryError",
"(",
"response",
".",
"json",
"(",
")",
")"
] |
f053c2effa7a9175c5521e690dbb23700c6c0cfe
|
test
|
Connection.fetch_sorted_metric
|
Fetch and sort time series data from OpenTSDB
Takes the same parameters as `fetch_metric`, but returns a list of
(timestamp, value) tuples sorted by timestamp.
|
pytsdb.py
|
def fetch_sorted_metric(self, *args, **kwargs):
"""Fetch and sort time series data from OpenTSDB
Takes the same parameters as `fetch_metric`, but returns a list of
(timestamp, value) tuples sorted by timestamp.
"""
return sorted(self.fetch_metric(*args, **kwargs).items(),
key=lambda x: float(x[0]))
|
def fetch_sorted_metric(self, *args, **kwargs):
"""Fetch and sort time series data from OpenTSDB
Takes the same parameters as `fetch_metric`, but returns a list of
(timestamp, value) tuples sorted by timestamp.
"""
return sorted(self.fetch_metric(*args, **kwargs).items(),
key=lambda x: float(x[0]))
|
[
"Fetch",
"and",
"sort",
"time",
"series",
"data",
"from",
"OpenTSDB"
] |
WhoopInc/pytsdb
|
python
|
https://github.com/WhoopInc/pytsdb/blob/f053c2effa7a9175c5521e690dbb23700c6c0cfe/pytsdb.py#L103-L110
|
[
"def",
"fetch_sorted_metric",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"sorted",
"(",
"self",
".",
"fetch_metric",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"float",
"(",
"x",
"[",
"0",
"]",
")",
")"
] |
f053c2effa7a9175c5521e690dbb23700c6c0cfe
|
test
|
pfreduce
|
A pointfree reduce / left fold function: Applies a function of two
arguments cumulatively to the items supplied by the given iterable, so
as to reduce the iterable to a single value. If an initial value is
supplied, it is placed before the items from the iterable in the
calculation, and serves as the default when the iterable is empty.
:param func: A function of two arguments
:param iterable: An iterable yielding input for the function
:param initial: An optional initial input for the function
:rtype: Single value
Example::
>>> from operator import add
>>> sum_of_squares = pfreduce(add, initial=0) * pfmap(lambda n: n**2)
>>> sum_of_squares([3, 4, 5, 6])
86
|
pointfree.py
|
def pfreduce(func, iterable, initial=None):
"""A pointfree reduce / left fold function: Applies a function of two
arguments cumulatively to the items supplied by the given iterable, so
as to reduce the iterable to a single value. If an initial value is
supplied, it is placed before the items from the iterable in the
calculation, and serves as the default when the iterable is empty.
:param func: A function of two arguments
:param iterable: An iterable yielding input for the function
:param initial: An optional initial input for the function
:rtype: Single value
Example::
>>> from operator import add
>>> sum_of_squares = pfreduce(add, initial=0) * pfmap(lambda n: n**2)
>>> sum_of_squares([3, 4, 5, 6])
86
"""
iterator = iter(iterable)
try:
first_item = next(iterator)
if initial:
value = func(initial, first_item)
else:
value = first_item
except StopIteration:
return initial
for item in iterator:
value = func(value, item)
return value
|
def pfreduce(func, iterable, initial=None):
"""A pointfree reduce / left fold function: Applies a function of two
arguments cumulatively to the items supplied by the given iterable, so
as to reduce the iterable to a single value. If an initial value is
supplied, it is placed before the items from the iterable in the
calculation, and serves as the default when the iterable is empty.
:param func: A function of two arguments
:param iterable: An iterable yielding input for the function
:param initial: An optional initial input for the function
:rtype: Single value
Example::
>>> from operator import add
>>> sum_of_squares = pfreduce(add, initial=0) * pfmap(lambda n: n**2)
>>> sum_of_squares([3, 4, 5, 6])
86
"""
iterator = iter(iterable)
try:
first_item = next(iterator)
if initial:
value = func(initial, first_item)
else:
value = first_item
except StopIteration:
return initial
for item in iterator:
value = func(value, item)
return value
|
[
"A",
"pointfree",
"reduce",
"/",
"left",
"fold",
"function",
":",
"Applies",
"a",
"function",
"of",
"two",
"arguments",
"cumulatively",
"to",
"the",
"items",
"supplied",
"by",
"the",
"given",
"iterable",
"so",
"as",
"to",
"reduce",
"the",
"iterable",
"to",
"a",
"single",
"value",
".",
"If",
"an",
"initial",
"value",
"is",
"supplied",
"it",
"is",
"placed",
"before",
"the",
"items",
"from",
"the",
"iterable",
"in",
"the",
"calculation",
"and",
"serves",
"as",
"the",
"default",
"when",
"the",
"iterable",
"is",
"empty",
"."
] |
mshroyer/pointfree
|
python
|
https://github.com/mshroyer/pointfree/blob/a25ecb3f0cd583e0730ecdde83018e5089711854/pointfree.py#L595-L629
|
[
"def",
"pfreduce",
"(",
"func",
",",
"iterable",
",",
"initial",
"=",
"None",
")",
":",
"iterator",
"=",
"iter",
"(",
"iterable",
")",
"try",
":",
"first_item",
"=",
"next",
"(",
"iterator",
")",
"if",
"initial",
":",
"value",
"=",
"func",
"(",
"initial",
",",
"first_item",
")",
"else",
":",
"value",
"=",
"first_item",
"except",
"StopIteration",
":",
"return",
"initial",
"for",
"item",
"in",
"iterator",
":",
"value",
"=",
"func",
"(",
"value",
",",
"item",
")",
"return",
"value"
] |
a25ecb3f0cd583e0730ecdde83018e5089711854
|
test
|
pfcollect
|
Collects and returns a list of values from the given iterable. If
the n parameter is not specified, collects all values from the
iterable.
:param iterable: An iterable yielding values for the list
:param n: An optional maximum number of items to collect
:rtype: List of values from the iterable
Example::
>>> @pointfree
... def fibonaccis():
... a, b = 0, 1
... while True:
... a, b = b, a+b
... yield a
>>> (pfcollect(n=10) * fibonaccis)()
[1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
|
pointfree.py
|
def pfcollect(iterable, n=None):
"""Collects and returns a list of values from the given iterable. If
the n parameter is not specified, collects all values from the
iterable.
:param iterable: An iterable yielding values for the list
:param n: An optional maximum number of items to collect
:rtype: List of values from the iterable
Example::
>>> @pointfree
... def fibonaccis():
... a, b = 0, 1
... while True:
... a, b = b, a+b
... yield a
>>> (pfcollect(n=10) * fibonaccis)()
[1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
"""
if n:
return list(itertools.islice(iterable, n))
else:
return list(iterable)
|
def pfcollect(iterable, n=None):
"""Collects and returns a list of values from the given iterable. If
the n parameter is not specified, collects all values from the
iterable.
:param iterable: An iterable yielding values for the list
:param n: An optional maximum number of items to collect
:rtype: List of values from the iterable
Example::
>>> @pointfree
... def fibonaccis():
... a, b = 0, 1
... while True:
... a, b = b, a+b
... yield a
>>> (pfcollect(n=10) * fibonaccis)()
[1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
"""
if n:
return list(itertools.islice(iterable, n))
else:
return list(iterable)
|
[
"Collects",
"and",
"returns",
"a",
"list",
"of",
"values",
"from",
"the",
"given",
"iterable",
".",
"If",
"the",
"n",
"parameter",
"is",
"not",
"specified",
"collects",
"all",
"values",
"from",
"the",
"iterable",
"."
] |
mshroyer/pointfree
|
python
|
https://github.com/mshroyer/pointfree/blob/a25ecb3f0cd583e0730ecdde83018e5089711854/pointfree.py#L649-L675
|
[
"def",
"pfcollect",
"(",
"iterable",
",",
"n",
"=",
"None",
")",
":",
"if",
"n",
":",
"return",
"list",
"(",
"itertools",
".",
"islice",
"(",
"iterable",
",",
"n",
")",
")",
"else",
":",
"return",
"list",
"(",
"iterable",
")"
] |
a25ecb3f0cd583e0730ecdde83018e5089711854
|
test
|
pfprint
|
Prints an item.
:param item: The item to print
:param end: String to append to the end of printed output
:param file: File to which output is printed
:rtype: None
Example::
>>> from operator import add
>>> fn = pfreduce(add, initial=0) >> pfprint
>>> fn([1, 2, 3, 4])
10
|
pointfree.py
|
def pfprint(item, end='\n', file=None):
"""Prints an item.
:param item: The item to print
:param end: String to append to the end of printed output
:param file: File to which output is printed
:rtype: None
Example::
>>> from operator import add
>>> fn = pfreduce(add, initial=0) >> pfprint
>>> fn([1, 2, 3, 4])
10
"""
# Can't just make sys.stdout the file argument's default value, because
# then we would be capturing the stdout file descriptor, and then
# doctest -- which works by redefining sys.stdout -- would fail:
if file is None:
file = sys.stdout
print(item, end=end, file=file)
|
def pfprint(item, end='\n', file=None):
"""Prints an item.
:param item: The item to print
:param end: String to append to the end of printed output
:param file: File to which output is printed
:rtype: None
Example::
>>> from operator import add
>>> fn = pfreduce(add, initial=0) >> pfprint
>>> fn([1, 2, 3, 4])
10
"""
# Can't just make sys.stdout the file argument's default value, because
# then we would be capturing the stdout file descriptor, and then
# doctest -- which works by redefining sys.stdout -- would fail:
if file is None:
file = sys.stdout
print(item, end=end, file=file)
|
[
"Prints",
"an",
"item",
"."
] |
mshroyer/pointfree
|
python
|
https://github.com/mshroyer/pointfree/blob/a25ecb3f0cd583e0730ecdde83018e5089711854/pointfree.py#L678-L702
|
[
"def",
"pfprint",
"(",
"item",
",",
"end",
"=",
"'\\n'",
",",
"file",
"=",
"None",
")",
":",
"# Can't just make sys.stdout the file argument's default value, because",
"# then we would be capturing the stdout file descriptor, and then",
"# doctest -- which works by redefining sys.stdout -- would fail:",
"if",
"file",
"is",
"None",
":",
"file",
"=",
"sys",
".",
"stdout",
"print",
"(",
"item",
",",
"end",
"=",
"end",
",",
"file",
"=",
"file",
")"
] |
a25ecb3f0cd583e0730ecdde83018e5089711854
|
test
|
pfprint_all
|
Prints each item from an iterable.
:param iterable: An iterable yielding values to print
:param end: String to append to the end of printed output
:param file: File to which output is printed
:rtype: None
Example::
>>> @pointfree
... def prefix_all(prefix, iterable):
... for item in iterable:
... yield "%s%s" % (prefix, item)
>>> fn = prefix_all("An item: ") >> pfprint_all
>>> fn(["foo", "bar", "baz"])
An item: foo
An item: bar
An item: baz
|
pointfree.py
|
def pfprint_all(iterable, end='\n', file=None):
"""Prints each item from an iterable.
:param iterable: An iterable yielding values to print
:param end: String to append to the end of printed output
:param file: File to which output is printed
:rtype: None
Example::
>>> @pointfree
... def prefix_all(prefix, iterable):
... for item in iterable:
... yield "%s%s" % (prefix, item)
>>> fn = prefix_all("An item: ") >> pfprint_all
>>> fn(["foo", "bar", "baz"])
An item: foo
An item: bar
An item: baz
"""
for item in iterable:
pfprint(item, end=end, file=file)
|
def pfprint_all(iterable, end='\n', file=None):
"""Prints each item from an iterable.
:param iterable: An iterable yielding values to print
:param end: String to append to the end of printed output
:param file: File to which output is printed
:rtype: None
Example::
>>> @pointfree
... def prefix_all(prefix, iterable):
... for item in iterable:
... yield "%s%s" % (prefix, item)
>>> fn = prefix_all("An item: ") >> pfprint_all
>>> fn(["foo", "bar", "baz"])
An item: foo
An item: bar
An item: baz
"""
for item in iterable:
pfprint(item, end=end, file=file)
|
[
"Prints",
"each",
"item",
"from",
"an",
"iterable",
"."
] |
mshroyer/pointfree
|
python
|
https://github.com/mshroyer/pointfree/blob/a25ecb3f0cd583e0730ecdde83018e5089711854/pointfree.py#L705-L730
|
[
"def",
"pfprint_all",
"(",
"iterable",
",",
"end",
"=",
"'\\n'",
",",
"file",
"=",
"None",
")",
":",
"for",
"item",
"in",
"iterable",
":",
"pfprint",
"(",
"item",
",",
"end",
"=",
"end",
",",
"file",
"=",
"file",
")"
] |
a25ecb3f0cd583e0730ecdde83018e5089711854
|
test
|
partial.__sig_from_func
|
Extract function signature, default arguments, keyword-only
arguments, and whether or not variable positional or keyword
arguments are allowed. This also supports calling unbound instance
methods by passing an object instance as the first argument;
however, unbound classmethod and staticmethod objects are not
callable, so we do not attempt to support them here.
|
pointfree.py
|
def __sig_from_func(self, func):
"""Extract function signature, default arguments, keyword-only
arguments, and whether or not variable positional or keyword
arguments are allowed. This also supports calling unbound instance
methods by passing an object instance as the first argument;
however, unbound classmethod and staticmethod objects are not
callable, so we do not attempt to support them here."""
if isinstance(func, types.MethodType):
# A bound instance or class method.
argspec = getfullargspec(func.__func__)
self.pargl = argspec[0][1:]
else:
# A regular function, an unbound instance method, or a
# bound static method.
argspec = getfullargspec(func)
self.pargl = argspec[0][:]
if argspec[3] is not None:
def_offset = len(self.pargl) - len(argspec[3])
self.def_argv = dict((self.pargl[def_offset+i],argspec[3][i]) \
for i in range(len(argspec[3])))
else:
self.def_argv = {}
self.var_pargs = argspec[1] is not None
self.var_kargs = argspec[2] is not None
self.kargl = argspec[4]
# We need keyword-only arguments' default values too.
if argspec[5] is not None:
self.def_argv.update(argspec[5])
|
def __sig_from_func(self, func):
"""Extract function signature, default arguments, keyword-only
arguments, and whether or not variable positional or keyword
arguments are allowed. This also supports calling unbound instance
methods by passing an object instance as the first argument;
however, unbound classmethod and staticmethod objects are not
callable, so we do not attempt to support them here."""
if isinstance(func, types.MethodType):
# A bound instance or class method.
argspec = getfullargspec(func.__func__)
self.pargl = argspec[0][1:]
else:
# A regular function, an unbound instance method, or a
# bound static method.
argspec = getfullargspec(func)
self.pargl = argspec[0][:]
if argspec[3] is not None:
def_offset = len(self.pargl) - len(argspec[3])
self.def_argv = dict((self.pargl[def_offset+i],argspec[3][i]) \
for i in range(len(argspec[3])))
else:
self.def_argv = {}
self.var_pargs = argspec[1] is not None
self.var_kargs = argspec[2] is not None
self.kargl = argspec[4]
# We need keyword-only arguments' default values too.
if argspec[5] is not None:
self.def_argv.update(argspec[5])
|
[
"Extract",
"function",
"signature",
"default",
"arguments",
"keyword",
"-",
"only",
"arguments",
"and",
"whether",
"or",
"not",
"variable",
"positional",
"or",
"keyword",
"arguments",
"are",
"allowed",
".",
"This",
"also",
"supports",
"calling",
"unbound",
"instance",
"methods",
"by",
"passing",
"an",
"object",
"instance",
"as",
"the",
"first",
"argument",
";",
"however",
"unbound",
"classmethod",
"and",
"staticmethod",
"objects",
"are",
"not",
"callable",
"so",
"we",
"do",
"not",
"attempt",
"to",
"support",
"them",
"here",
"."
] |
mshroyer/pointfree
|
python
|
https://github.com/mshroyer/pointfree/blob/a25ecb3f0cd583e0730ecdde83018e5089711854/pointfree.py#L364-L395
|
[
"def",
"__sig_from_func",
"(",
"self",
",",
"func",
")",
":",
"if",
"isinstance",
"(",
"func",
",",
"types",
".",
"MethodType",
")",
":",
"# A bound instance or class method.",
"argspec",
"=",
"getfullargspec",
"(",
"func",
".",
"__func__",
")",
"self",
".",
"pargl",
"=",
"argspec",
"[",
"0",
"]",
"[",
"1",
":",
"]",
"else",
":",
"# A regular function, an unbound instance method, or a",
"# bound static method.",
"argspec",
"=",
"getfullargspec",
"(",
"func",
")",
"self",
".",
"pargl",
"=",
"argspec",
"[",
"0",
"]",
"[",
":",
"]",
"if",
"argspec",
"[",
"3",
"]",
"is",
"not",
"None",
":",
"def_offset",
"=",
"len",
"(",
"self",
".",
"pargl",
")",
"-",
"len",
"(",
"argspec",
"[",
"3",
"]",
")",
"self",
".",
"def_argv",
"=",
"dict",
"(",
"(",
"self",
".",
"pargl",
"[",
"def_offset",
"+",
"i",
"]",
",",
"argspec",
"[",
"3",
"]",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"argspec",
"[",
"3",
"]",
")",
")",
")",
"else",
":",
"self",
".",
"def_argv",
"=",
"{",
"}",
"self",
".",
"var_pargs",
"=",
"argspec",
"[",
"1",
"]",
"is",
"not",
"None",
"self",
".",
"var_kargs",
"=",
"argspec",
"[",
"2",
"]",
"is",
"not",
"None",
"self",
".",
"kargl",
"=",
"argspec",
"[",
"4",
"]",
"# We need keyword-only arguments' default values too.",
"if",
"argspec",
"[",
"5",
"]",
"is",
"not",
"None",
":",
"self",
".",
"def_argv",
".",
"update",
"(",
"argspec",
"[",
"5",
"]",
")"
] |
a25ecb3f0cd583e0730ecdde83018e5089711854
|
test
|
partial.__sig_from_partial
|
Extract function signature from an existing partial instance.
|
pointfree.py
|
def __sig_from_partial(self, inst):
"""Extract function signature from an existing partial instance."""
self.pargl = list(inst.pargl)
self.kargl = list(inst.kargl)
self.def_argv = inst.def_argv.copy()
self.var_pargs = inst.var_pargs
self.var_kargs = inst.var_kargs
|
def __sig_from_partial(self, inst):
"""Extract function signature from an existing partial instance."""
self.pargl = list(inst.pargl)
self.kargl = list(inst.kargl)
self.def_argv = inst.def_argv.copy()
self.var_pargs = inst.var_pargs
self.var_kargs = inst.var_kargs
|
[
"Extract",
"function",
"signature",
"from",
"an",
"existing",
"partial",
"instance",
"."
] |
mshroyer/pointfree
|
python
|
https://github.com/mshroyer/pointfree/blob/a25ecb3f0cd583e0730ecdde83018e5089711854/pointfree.py#L397-L404
|
[
"def",
"__sig_from_partial",
"(",
"self",
",",
"inst",
")",
":",
"self",
".",
"pargl",
"=",
"list",
"(",
"inst",
".",
"pargl",
")",
"self",
".",
"kargl",
"=",
"list",
"(",
"inst",
".",
"kargl",
")",
"self",
".",
"def_argv",
"=",
"inst",
".",
"def_argv",
".",
"copy",
"(",
")",
"self",
".",
"var_pargs",
"=",
"inst",
".",
"var_pargs",
"self",
".",
"var_kargs",
"=",
"inst",
".",
"var_kargs"
] |
a25ecb3f0cd583e0730ecdde83018e5089711854
|
test
|
partial.make_copy
|
Makes a new instance of the partial application wrapper based on
an existing instance, optionally overriding the original's wrapped
function and/or saved arguments.
:param inst: The partial instance we're copying
:param func: Override the original's wrapped function
:param argv: Override saved argument values
:param extra_argv: Override saved extra positional arguments
:param copy_sig: Copy original's signature?
:rtype: New partial wrapper instance
|
pointfree.py
|
def make_copy(klass, inst, func=None, argv=None, extra_argv=None, copy_sig=True):
"""Makes a new instance of the partial application wrapper based on
an existing instance, optionally overriding the original's wrapped
function and/or saved arguments.
:param inst: The partial instance we're copying
:param func: Override the original's wrapped function
:param argv: Override saved argument values
:param extra_argv: Override saved extra positional arguments
:param copy_sig: Copy original's signature?
:rtype: New partial wrapper instance
"""
dest = klass(func or inst.func)
dest.argv = (argv or inst.argv).copy()
dest.extra_argv = list(extra_argv if extra_argv else inst.extra_argv)
if copy_sig:
dest.__sig_from_partial(inst)
return dest
|
def make_copy(klass, inst, func=None, argv=None, extra_argv=None, copy_sig=True):
"""Makes a new instance of the partial application wrapper based on
an existing instance, optionally overriding the original's wrapped
function and/or saved arguments.
:param inst: The partial instance we're copying
:param func: Override the original's wrapped function
:param argv: Override saved argument values
:param extra_argv: Override saved extra positional arguments
:param copy_sig: Copy original's signature?
:rtype: New partial wrapper instance
"""
dest = klass(func or inst.func)
dest.argv = (argv or inst.argv).copy()
dest.extra_argv = list(extra_argv if extra_argv else inst.extra_argv)
if copy_sig:
dest.__sig_from_partial(inst)
return dest
|
[
"Makes",
"a",
"new",
"instance",
"of",
"the",
"partial",
"application",
"wrapper",
"based",
"on",
"an",
"existing",
"instance",
"optionally",
"overriding",
"the",
"original",
"s",
"wrapped",
"function",
"and",
"/",
"or",
"saved",
"arguments",
"."
] |
mshroyer/pointfree
|
python
|
https://github.com/mshroyer/pointfree/blob/a25ecb3f0cd583e0730ecdde83018e5089711854/pointfree.py#L407-L428
|
[
"def",
"make_copy",
"(",
"klass",
",",
"inst",
",",
"func",
"=",
"None",
",",
"argv",
"=",
"None",
",",
"extra_argv",
"=",
"None",
",",
"copy_sig",
"=",
"True",
")",
":",
"dest",
"=",
"klass",
"(",
"func",
"or",
"inst",
".",
"func",
")",
"dest",
".",
"argv",
"=",
"(",
"argv",
"or",
"inst",
".",
"argv",
")",
".",
"copy",
"(",
")",
"dest",
".",
"extra_argv",
"=",
"list",
"(",
"extra_argv",
"if",
"extra_argv",
"else",
"inst",
".",
"extra_argv",
")",
"if",
"copy_sig",
":",
"dest",
".",
"__sig_from_partial",
"(",
"inst",
")",
"return",
"dest"
] |
a25ecb3f0cd583e0730ecdde83018e5089711854
|
test
|
partial.__new_argv
|
Calculate new argv and extra_argv values resulting from adding
the specified positional and keyword arguments.
|
pointfree.py
|
def __new_argv(self, *new_pargs, **new_kargs):
"""Calculate new argv and extra_argv values resulting from adding
the specified positional and keyword arguments."""
new_argv = self.argv.copy()
new_extra_argv = list(self.extra_argv)
for v in new_pargs:
arg_name = None
for name in self.pargl:
if not name in new_argv:
arg_name = name
break
if arg_name:
new_argv[arg_name] = v
elif self.var_pargs:
new_extra_argv.append(v)
else:
num_prev_pargs = len([name for name in self.pargl if name in self.argv])
raise TypeError("%s() takes exactly %d positional arguments (%d given)" \
% (self.__name__,
len(self.pargl),
num_prev_pargs + len(new_pargs)))
for k,v in new_kargs.items():
if not (self.var_kargs or (k in self.pargl) or (k in self.kargl)):
raise TypeError("%s() got an unexpected keyword argument '%s'" \
% (self.__name__, k))
new_argv[k] = v
return (new_argv, new_extra_argv)
|
def __new_argv(self, *new_pargs, **new_kargs):
"""Calculate new argv and extra_argv values resulting from adding
the specified positional and keyword arguments."""
new_argv = self.argv.copy()
new_extra_argv = list(self.extra_argv)
for v in new_pargs:
arg_name = None
for name in self.pargl:
if not name in new_argv:
arg_name = name
break
if arg_name:
new_argv[arg_name] = v
elif self.var_pargs:
new_extra_argv.append(v)
else:
num_prev_pargs = len([name for name in self.pargl if name in self.argv])
raise TypeError("%s() takes exactly %d positional arguments (%d given)" \
% (self.__name__,
len(self.pargl),
num_prev_pargs + len(new_pargs)))
for k,v in new_kargs.items():
if not (self.var_kargs or (k in self.pargl) or (k in self.kargl)):
raise TypeError("%s() got an unexpected keyword argument '%s'" \
% (self.__name__, k))
new_argv[k] = v
return (new_argv, new_extra_argv)
|
[
"Calculate",
"new",
"argv",
"and",
"extra_argv",
"values",
"resulting",
"from",
"adding",
"the",
"specified",
"positional",
"and",
"keyword",
"arguments",
"."
] |
mshroyer/pointfree
|
python
|
https://github.com/mshroyer/pointfree/blob/a25ecb3f0cd583e0730ecdde83018e5089711854/pointfree.py#L433-L464
|
[
"def",
"__new_argv",
"(",
"self",
",",
"*",
"new_pargs",
",",
"*",
"*",
"new_kargs",
")",
":",
"new_argv",
"=",
"self",
".",
"argv",
".",
"copy",
"(",
")",
"new_extra_argv",
"=",
"list",
"(",
"self",
".",
"extra_argv",
")",
"for",
"v",
"in",
"new_pargs",
":",
"arg_name",
"=",
"None",
"for",
"name",
"in",
"self",
".",
"pargl",
":",
"if",
"not",
"name",
"in",
"new_argv",
":",
"arg_name",
"=",
"name",
"break",
"if",
"arg_name",
":",
"new_argv",
"[",
"arg_name",
"]",
"=",
"v",
"elif",
"self",
".",
"var_pargs",
":",
"new_extra_argv",
".",
"append",
"(",
"v",
")",
"else",
":",
"num_prev_pargs",
"=",
"len",
"(",
"[",
"name",
"for",
"name",
"in",
"self",
".",
"pargl",
"if",
"name",
"in",
"self",
".",
"argv",
"]",
")",
"raise",
"TypeError",
"(",
"\"%s() takes exactly %d positional arguments (%d given)\"",
"%",
"(",
"self",
".",
"__name__",
",",
"len",
"(",
"self",
".",
"pargl",
")",
",",
"num_prev_pargs",
"+",
"len",
"(",
"new_pargs",
")",
")",
")",
"for",
"k",
",",
"v",
"in",
"new_kargs",
".",
"items",
"(",
")",
":",
"if",
"not",
"(",
"self",
".",
"var_kargs",
"or",
"(",
"k",
"in",
"self",
".",
"pargl",
")",
"or",
"(",
"k",
"in",
"self",
".",
"kargl",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"%s() got an unexpected keyword argument '%s'\"",
"%",
"(",
"self",
".",
"__name__",
",",
"k",
")",
")",
"new_argv",
"[",
"k",
"]",
"=",
"v",
"return",
"(",
"new_argv",
",",
"new_extra_argv",
")"
] |
a25ecb3f0cd583e0730ecdde83018e5089711854
|
test
|
ignore_certain_metainf_files
|
We do not support multiple signatures in XPI signing because the client
side code makes some pretty reasonable assumptions about a single signature
on any given JAR. This function returns True if the file name given is one
that we dispose of to prevent multiple signatures.
|
sign_xpi_lib/sign_xpi_lib.py
|
def ignore_certain_metainf_files(filename):
"""
We do not support multiple signatures in XPI signing because the client
side code makes some pretty reasonable assumptions about a single signature
on any given JAR. This function returns True if the file name given is one
that we dispose of to prevent multiple signatures.
"""
ignore = ("META-INF/manifest.mf",
"META-INF/*.sf",
"META-INF/*.rsa",
"META-INF/*.dsa",
"META-INF/ids.json")
for glob in ignore:
# Explicitly match against all upper case to prevent the kind of
# runtime errors that lead to https://bugzil.la/1169574
if fnmatch.fnmatchcase(filename.upper(), glob.upper()):
return True
return False
|
def ignore_certain_metainf_files(filename):
"""
We do not support multiple signatures in XPI signing because the client
side code makes some pretty reasonable assumptions about a single signature
on any given JAR. This function returns True if the file name given is one
that we dispose of to prevent multiple signatures.
"""
ignore = ("META-INF/manifest.mf",
"META-INF/*.sf",
"META-INF/*.rsa",
"META-INF/*.dsa",
"META-INF/ids.json")
for glob in ignore:
# Explicitly match against all upper case to prevent the kind of
# runtime errors that lead to https://bugzil.la/1169574
if fnmatch.fnmatchcase(filename.upper(), glob.upper()):
return True
return False
|
[
"We",
"do",
"not",
"support",
"multiple",
"signatures",
"in",
"XPI",
"signing",
"because",
"the",
"client",
"side",
"code",
"makes",
"some",
"pretty",
"reasonable",
"assumptions",
"about",
"a",
"single",
"signature",
"on",
"any",
"given",
"JAR",
".",
"This",
"function",
"returns",
"True",
"if",
"the",
"file",
"name",
"given",
"is",
"one",
"that",
"we",
"dispose",
"of",
"to",
"prevent",
"multiple",
"signatures",
"."
] |
mozilla-services/sign-xpi-lib
|
python
|
https://github.com/mozilla-services/sign-xpi-lib/blob/bc6860b555fd26de9204f8d17289903e4fb9f106/sign_xpi_lib/sign_xpi_lib.py#L25-L43
|
[
"def",
"ignore_certain_metainf_files",
"(",
"filename",
")",
":",
"ignore",
"=",
"(",
"\"META-INF/manifest.mf\"",
",",
"\"META-INF/*.sf\"",
",",
"\"META-INF/*.rsa\"",
",",
"\"META-INF/*.dsa\"",
",",
"\"META-INF/ids.json\"",
")",
"for",
"glob",
"in",
"ignore",
":",
"# Explicitly match against all upper case to prevent the kind of",
"# runtime errors that lead to https://bugzil.la/1169574",
"if",
"fnmatch",
".",
"fnmatchcase",
"(",
"filename",
".",
"upper",
"(",
")",
",",
"glob",
".",
"upper",
"(",
")",
")",
":",
"return",
"True",
"return",
"False"
] |
bc6860b555fd26de9204f8d17289903e4fb9f106
|
test
|
file_key
|
Sort keys for xpi files
The filenames in a manifest are ordered so that files not in a
directory come before files in any directory, ordered
alphabetically but ignoring case, with a few exceptions
(install.rdf, chrome.manifest, icon.png and icon64.png come at the
beginning; licenses come at the end).
This order does not appear to affect anything in any way, but it
looks nicer.
|
sign_xpi_lib/sign_xpi_lib.py
|
def file_key(filename):
'''Sort keys for xpi files
The filenames in a manifest are ordered so that files not in a
directory come before files in any directory, ordered
alphabetically but ignoring case, with a few exceptions
(install.rdf, chrome.manifest, icon.png and icon64.png come at the
beginning; licenses come at the end).
This order does not appear to affect anything in any way, but it
looks nicer.
'''
prio = 4
if filename == 'install.rdf':
prio = 1
elif filename in ["chrome.manifest", "icon.png", "icon64.png"]:
prio = 2
elif filename in ["MPL", "GPL", "LGPL", "COPYING",
"LICENSE", "license.txt"]:
prio = 5
return (prio, os.path.split(filename.lower()))
|
def file_key(filename):
'''Sort keys for xpi files
The filenames in a manifest are ordered so that files not in a
directory come before files in any directory, ordered
alphabetically but ignoring case, with a few exceptions
(install.rdf, chrome.manifest, icon.png and icon64.png come at the
beginning; licenses come at the end).
This order does not appear to affect anything in any way, but it
looks nicer.
'''
prio = 4
if filename == 'install.rdf':
prio = 1
elif filename in ["chrome.manifest", "icon.png", "icon64.png"]:
prio = 2
elif filename in ["MPL", "GPL", "LGPL", "COPYING",
"LICENSE", "license.txt"]:
prio = 5
return (prio, os.path.split(filename.lower()))
|
[
"Sort",
"keys",
"for",
"xpi",
"files"
] |
mozilla-services/sign-xpi-lib
|
python
|
https://github.com/mozilla-services/sign-xpi-lib/blob/bc6860b555fd26de9204f8d17289903e4fb9f106/sign_xpi_lib/sign_xpi_lib.py#L46-L66
|
[
"def",
"file_key",
"(",
"filename",
")",
":",
"prio",
"=",
"4",
"if",
"filename",
"==",
"'install.rdf'",
":",
"prio",
"=",
"1",
"elif",
"filename",
"in",
"[",
"\"chrome.manifest\"",
",",
"\"icon.png\"",
",",
"\"icon64.png\"",
"]",
":",
"prio",
"=",
"2",
"elif",
"filename",
"in",
"[",
"\"MPL\"",
",",
"\"GPL\"",
",",
"\"LGPL\"",
",",
"\"COPYING\"",
",",
"\"LICENSE\"",
",",
"\"license.txt\"",
"]",
":",
"prio",
"=",
"5",
"return",
"(",
"prio",
",",
"os",
".",
"path",
".",
"split",
"(",
"filename",
".",
"lower",
"(",
")",
")",
")"
] |
bc6860b555fd26de9204f8d17289903e4fb9f106
|
test
|
vlq2int
|
Read one VLQ-encoded integer value from an input data stream.
|
adjutant.py
|
def vlq2int(data):
"""Read one VLQ-encoded integer value from an input data stream."""
# The VLQ is little-endian.
byte = ord(data.read(1))
value = byte & 0x7F
shift = 1
while byte & 0x80 != 0:
byte = ord(data.read(1))
value = ((byte & 0x7F) << shift * 7) | value
shift += 1
return value
|
def vlq2int(data):
"""Read one VLQ-encoded integer value from an input data stream."""
# The VLQ is little-endian.
byte = ord(data.read(1))
value = byte & 0x7F
shift = 1
while byte & 0x80 != 0:
byte = ord(data.read(1))
value = ((byte & 0x7F) << shift * 7) | value
shift += 1
return value
|
[
"Read",
"one",
"VLQ",
"-",
"encoded",
"integer",
"value",
"from",
"an",
"input",
"data",
"stream",
"."
] |
eagleflo/adjutant
|
python
|
https://github.com/eagleflo/adjutant/blob/85d800d9979fa122e0888af48c2e6a697f9da458/adjutant.py#L109-L119
|
[
"def",
"vlq2int",
"(",
"data",
")",
":",
"# The VLQ is little-endian.",
"byte",
"=",
"ord",
"(",
"data",
".",
"read",
"(",
"1",
")",
")",
"value",
"=",
"byte",
"&",
"0x7F",
"shift",
"=",
"1",
"while",
"byte",
"&",
"0x80",
"!=",
"0",
":",
"byte",
"=",
"ord",
"(",
"data",
".",
"read",
"(",
"1",
")",
")",
"value",
"=",
"(",
"(",
"byte",
"&",
"0x7F",
")",
"<<",
"shift",
"*",
"7",
")",
"|",
"value",
"shift",
"+=",
"1",
"return",
"value"
] |
85d800d9979fa122e0888af48c2e6a697f9da458
|
test
|
read_table
|
Read a table structure.
These are used by Blizzard to collect pieces of data together. Each
value is prefixed by two bytes, first denoting (doubled) index and the
second denoting some sort of key -- so far it has always been '09'. The
actual value follows as a Variable-Length Quantity, also known as uintvar.
The actual value is also doubled.
In some tables the keys might jump from 0A 09 to 04 09 for example.
I have no idea why this happens, as the next logical key is 0C. Perhaps
it's a table in a table? Some sort of headers might exist for these
tables, I'd imagine at least denoting length. Further research required.
|
adjutant.py
|
def read_table(data, fields):
"""Read a table structure.
These are used by Blizzard to collect pieces of data together. Each
value is prefixed by two bytes, first denoting (doubled) index and the
second denoting some sort of key -- so far it has always been '09'. The
actual value follows as a Variable-Length Quantity, also known as uintvar.
The actual value is also doubled.
In some tables the keys might jump from 0A 09 to 04 09 for example.
I have no idea why this happens, as the next logical key is 0C. Perhaps
it's a table in a table? Some sort of headers might exist for these
tables, I'd imagine at least denoting length. Further research required.
"""
def read_field(field_name):
data.read(2)
table[field_name] = vlq2int(data) / 2
# Discard unknown fields.
if field_name == 'unknown':
del table[field_name]
table = {}
for field in fields:
read_field(field)
return table
|
def read_table(data, fields):
"""Read a table structure.
These are used by Blizzard to collect pieces of data together. Each
value is prefixed by two bytes, first denoting (doubled) index and the
second denoting some sort of key -- so far it has always been '09'. The
actual value follows as a Variable-Length Quantity, also known as uintvar.
The actual value is also doubled.
In some tables the keys might jump from 0A 09 to 04 09 for example.
I have no idea why this happens, as the next logical key is 0C. Perhaps
it's a table in a table? Some sort of headers might exist for these
tables, I'd imagine at least denoting length. Further research required.
"""
def read_field(field_name):
data.read(2)
table[field_name] = vlq2int(data) / 2
# Discard unknown fields.
if field_name == 'unknown':
del table[field_name]
table = {}
for field in fields:
read_field(field)
return table
|
[
"Read",
"a",
"table",
"structure",
"."
] |
eagleflo/adjutant
|
python
|
https://github.com/eagleflo/adjutant/blob/85d800d9979fa122e0888af48c2e6a697f9da458/adjutant.py#L122-L146
|
[
"def",
"read_table",
"(",
"data",
",",
"fields",
")",
":",
"def",
"read_field",
"(",
"field_name",
")",
":",
"data",
".",
"read",
"(",
"2",
")",
"table",
"[",
"field_name",
"]",
"=",
"vlq2int",
"(",
"data",
")",
"/",
"2",
"# Discard unknown fields.",
"if",
"field_name",
"==",
"'unknown'",
":",
"del",
"table",
"[",
"field_name",
"]",
"table",
"=",
"{",
"}",
"for",
"field",
"in",
"fields",
":",
"read_field",
"(",
"field",
")",
"return",
"table"
] |
85d800d9979fa122e0888af48c2e6a697f9da458
|
test
|
SC2Replay._parse_header
|
Parse the user data header portion of the replay.
|
adjutant.py
|
def _parse_header(self):
"""Parse the user data header portion of the replay."""
header = OrderedDict()
user_data_header = self.archive.header['user_data_header']['content']
if re.search(r'StarCraft II replay', user_data_header):
user_data_header = StringIO.StringIO(user_data_header)
user_data_header.seek(30) # Just skip the beginning.
header.update(read_table(user_data_header, ['release_flag',
'major_version',
'minor_version',
'maintenance_version',
'build_number',
'unknown',
'unknown',
'duration']))
# Some post processing is required.
header['version'] = '%s.%s.%s.%s' % (header['major_version'],
header['minor_version'],
header['maintenance_version'],
header['build_number'])
if not header['release_flag']:
header['version'] += ' (dev)'
# Duration is actually stored as 1/16th of a seconds. Go figure.
header['duration'] /= 16
else:
raise ValueError("The given file is not a StarCraft II replay.")
return header
|
def _parse_header(self):
"""Parse the user data header portion of the replay."""
header = OrderedDict()
user_data_header = self.archive.header['user_data_header']['content']
if re.search(r'StarCraft II replay', user_data_header):
user_data_header = StringIO.StringIO(user_data_header)
user_data_header.seek(30) # Just skip the beginning.
header.update(read_table(user_data_header, ['release_flag',
'major_version',
'minor_version',
'maintenance_version',
'build_number',
'unknown',
'unknown',
'duration']))
# Some post processing is required.
header['version'] = '%s.%s.%s.%s' % (header['major_version'],
header['minor_version'],
header['maintenance_version'],
header['build_number'])
if not header['release_flag']:
header['version'] += ' (dev)'
# Duration is actually stored as 1/16th of a seconds. Go figure.
header['duration'] /= 16
else:
raise ValueError("The given file is not a StarCraft II replay.")
return header
|
[
"Parse",
"the",
"user",
"data",
"header",
"portion",
"of",
"the",
"replay",
"."
] |
eagleflo/adjutant
|
python
|
https://github.com/eagleflo/adjutant/blob/85d800d9979fa122e0888af48c2e6a697f9da458/adjutant.py#L163-L191
|
[
"def",
"_parse_header",
"(",
"self",
")",
":",
"header",
"=",
"OrderedDict",
"(",
")",
"user_data_header",
"=",
"self",
".",
"archive",
".",
"header",
"[",
"'user_data_header'",
"]",
"[",
"'content'",
"]",
"if",
"re",
".",
"search",
"(",
"r'StarCraft II replay'",
",",
"user_data_header",
")",
":",
"user_data_header",
"=",
"StringIO",
".",
"StringIO",
"(",
"user_data_header",
")",
"user_data_header",
".",
"seek",
"(",
"30",
")",
"# Just skip the beginning.",
"header",
".",
"update",
"(",
"read_table",
"(",
"user_data_header",
",",
"[",
"'release_flag'",
",",
"'major_version'",
",",
"'minor_version'",
",",
"'maintenance_version'",
",",
"'build_number'",
",",
"'unknown'",
",",
"'unknown'",
",",
"'duration'",
"]",
")",
")",
"# Some post processing is required.",
"header",
"[",
"'version'",
"]",
"=",
"'%s.%s.%s.%s'",
"%",
"(",
"header",
"[",
"'major_version'",
"]",
",",
"header",
"[",
"'minor_version'",
"]",
",",
"header",
"[",
"'maintenance_version'",
"]",
",",
"header",
"[",
"'build_number'",
"]",
")",
"if",
"not",
"header",
"[",
"'release_flag'",
"]",
":",
"header",
"[",
"'version'",
"]",
"+=",
"' (dev)'",
"# Duration is actually stored as 1/16th of a seconds. Go figure.",
"header",
"[",
"'duration'",
"]",
"/=",
"16",
"else",
":",
"raise",
"ValueError",
"(",
"\"The given file is not a StarCraft II replay.\"",
")",
"return",
"header"
] |
85d800d9979fa122e0888af48c2e6a697f9da458
|
test
|
SC2Replay.get_duration
|
Transform duration into a human-readable form.
|
adjutant.py
|
def get_duration(self, seconds):
"""Transform duration into a human-readable form."""
duration = ""
minutes, seconds = divmod(seconds, 60)
if minutes >= 60:
hours, minutes = divmod(minutes, 60)
duration = "%sh " % hours
duration += "%sm %ss" % (minutes, seconds)
return duration
|
def get_duration(self, seconds):
"""Transform duration into a human-readable form."""
duration = ""
minutes, seconds = divmod(seconds, 60)
if minutes >= 60:
hours, minutes = divmod(minutes, 60)
duration = "%sh " % hours
duration += "%sm %ss" % (minutes, seconds)
return duration
|
[
"Transform",
"duration",
"into",
"a",
"human",
"-",
"readable",
"form",
"."
] |
eagleflo/adjutant
|
python
|
https://github.com/eagleflo/adjutant/blob/85d800d9979fa122e0888af48c2e6a697f9da458/adjutant.py#L241-L249
|
[
"def",
"get_duration",
"(",
"self",
",",
"seconds",
")",
":",
"duration",
"=",
"\"\"",
"minutes",
",",
"seconds",
"=",
"divmod",
"(",
"seconds",
",",
"60",
")",
"if",
"minutes",
">=",
"60",
":",
"hours",
",",
"minutes",
"=",
"divmod",
"(",
"minutes",
",",
"60",
")",
"duration",
"=",
"\"%sh \"",
"%",
"hours",
"duration",
"+=",
"\"%sm %ss\"",
"%",
"(",
"minutes",
",",
"seconds",
")",
"return",
"duration"
] |
85d800d9979fa122e0888af48c2e6a697f9da458
|
test
|
SC2Replay.print_details
|
Print a summary of the game details.
|
adjutant.py
|
def print_details(self):
"""Print a summary of the game details."""
print 'Map ', self.map
print 'Duration ', self.duration
print 'Version ', self.version
print 'Team Player Race Color'
print '-----------------------------------'
for player in self.players:
print '{team:<5} {name:12} {race:10} {color}'.format(**player)
|
def print_details(self):
"""Print a summary of the game details."""
print 'Map ', self.map
print 'Duration ', self.duration
print 'Version ', self.version
print 'Team Player Race Color'
print '-----------------------------------'
for player in self.players:
print '{team:<5} {name:12} {race:10} {color}'.format(**player)
|
[
"Print",
"a",
"summary",
"of",
"the",
"game",
"details",
"."
] |
eagleflo/adjutant
|
python
|
https://github.com/eagleflo/adjutant/blob/85d800d9979fa122e0888af48c2e6a697f9da458/adjutant.py#L251-L259
|
[
"def",
"print_details",
"(",
"self",
")",
":",
"print",
"'Map '",
",",
"self",
".",
"map",
"print",
"'Duration '",
",",
"self",
".",
"duration",
"print",
"'Version '",
",",
"self",
".",
"version",
"print",
"'Team Player Race Color'",
"print",
"'-----------------------------------'",
"for",
"player",
"in",
"self",
".",
"players",
":",
"print",
"'{team:<5} {name:12} {race:10} {color}'",
".",
"format",
"(",
"*",
"*",
"player",
")"
] |
85d800d9979fa122e0888af48c2e6a697f9da458
|
test
|
FormEvents.data
|
This function gets back data that the user typed.
|
gui/gui_mainLayout.py
|
def data(self):
"""
This function gets back data that the user typed.
"""
self.batch_name_value = self.ui.batch_name_value.text()
self.saa_values = self.ui.saa_values.text()
self.sza_values = self.ui.sza_values.text()
self.p_values = self.ui.p_values.text()
self.x_value = self.ui.x_value.text()
self.y_value = self.ui.y_value.text()
self.g_value = self.ui.g_value.text()
self.s_value = self.ui.s_value.text()
self.z_value = self.ui.z_value.text()
self.wavelength_values = self.ui.wavelength_values.text()
self.verbose_value = self.ui.verbose_value.text()
self.phytoplankton_path = self.ui.phyto_path.text()
self.bottom_path = self.ui.bottom_path.text()
self.executive_path = self.ui.exec_path.text()
self.nb_cpu = self.ui.nb_cpu.currentText()
self.report_parameter_value = str(self.ui.report_parameter_value.text())
|
def data(self):
"""
This function gets back data that the user typed.
"""
self.batch_name_value = self.ui.batch_name_value.text()
self.saa_values = self.ui.saa_values.text()
self.sza_values = self.ui.sza_values.text()
self.p_values = self.ui.p_values.text()
self.x_value = self.ui.x_value.text()
self.y_value = self.ui.y_value.text()
self.g_value = self.ui.g_value.text()
self.s_value = self.ui.s_value.text()
self.z_value = self.ui.z_value.text()
self.wavelength_values = self.ui.wavelength_values.text()
self.verbose_value = self.ui.verbose_value.text()
self.phytoplankton_path = self.ui.phyto_path.text()
self.bottom_path = self.ui.bottom_path.text()
self.executive_path = self.ui.exec_path.text()
self.nb_cpu = self.ui.nb_cpu.currentText()
self.report_parameter_value = str(self.ui.report_parameter_value.text())
|
[
"This",
"function",
"gets",
"back",
"data",
"that",
"the",
"user",
"typed",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L138-L157
|
[
"def",
"data",
"(",
"self",
")",
":",
"self",
".",
"batch_name_value",
"=",
"self",
".",
"ui",
".",
"batch_name_value",
".",
"text",
"(",
")",
"self",
".",
"saa_values",
"=",
"self",
".",
"ui",
".",
"saa_values",
".",
"text",
"(",
")",
"self",
".",
"sza_values",
"=",
"self",
".",
"ui",
".",
"sza_values",
".",
"text",
"(",
")",
"self",
".",
"p_values",
"=",
"self",
".",
"ui",
".",
"p_values",
".",
"text",
"(",
")",
"self",
".",
"x_value",
"=",
"self",
".",
"ui",
".",
"x_value",
".",
"text",
"(",
")",
"self",
".",
"y_value",
"=",
"self",
".",
"ui",
".",
"y_value",
".",
"text",
"(",
")",
"self",
".",
"g_value",
"=",
"self",
".",
"ui",
".",
"g_value",
".",
"text",
"(",
")",
"self",
".",
"s_value",
"=",
"self",
".",
"ui",
".",
"s_value",
".",
"text",
"(",
")",
"self",
".",
"z_value",
"=",
"self",
".",
"ui",
".",
"z_value",
".",
"text",
"(",
")",
"self",
".",
"wavelength_values",
"=",
"self",
".",
"ui",
".",
"wavelength_values",
".",
"text",
"(",
")",
"self",
".",
"verbose_value",
"=",
"self",
".",
"ui",
".",
"verbose_value",
".",
"text",
"(",
")",
"self",
".",
"phytoplankton_path",
"=",
"self",
".",
"ui",
".",
"phyto_path",
".",
"text",
"(",
")",
"self",
".",
"bottom_path",
"=",
"self",
".",
"ui",
".",
"bottom_path",
".",
"text",
"(",
")",
"self",
".",
"executive_path",
"=",
"self",
".",
"ui",
".",
"exec_path",
".",
"text",
"(",
")",
"self",
".",
"nb_cpu",
"=",
"self",
".",
"ui",
".",
"nb_cpu",
".",
"currentText",
"(",
")",
"self",
".",
"report_parameter_value",
"=",
"str",
"(",
"self",
".",
"ui",
".",
"report_parameter_value",
".",
"text",
"(",
")",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.search_file_result
|
This function once the file found, display data's file and the graphic associated.
|
gui/gui_mainLayout.py
|
def search_file_result(self):
"""
This function once the file found, display data's file and the graphic associated.
"""
if self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE:
self.result_file = self.file_dialog.getOpenFileName(caption=str("Open Report File"), directory="./outputs")
if not self.result_file == '':
self.ui.show_all_curves.setDisabled(False)
self.ui.show_grid.setDisabled(False)
self.data_processing()
self.display_the_graphic(self.num_line, self.wavelength, self.data_wanted, self.information)
self.authorized_display = True
|
def search_file_result(self):
"""
This function once the file found, display data's file and the graphic associated.
"""
if self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE:
self.result_file = self.file_dialog.getOpenFileName(caption=str("Open Report File"), directory="./outputs")
if not self.result_file == '':
self.ui.show_all_curves.setDisabled(False)
self.ui.show_grid.setDisabled(False)
self.data_processing()
self.display_the_graphic(self.num_line, self.wavelength, self.data_wanted, self.information)
self.authorized_display = True
|
[
"This",
"function",
"once",
"the",
"file",
"found",
"display",
"data",
"s",
"file",
"and",
"the",
"graphic",
"associated",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L181-L192
|
[
"def",
"search_file_result",
"(",
"self",
")",
":",
"if",
"self",
".",
"ui",
".",
"tabWidget",
".",
"currentIndex",
"(",
")",
"==",
"TabWidget",
".",
"NORMAL_MODE",
":",
"self",
".",
"result_file",
"=",
"self",
".",
"file_dialog",
".",
"getOpenFileName",
"(",
"caption",
"=",
"str",
"(",
"\"Open Report File\"",
")",
",",
"directory",
"=",
"\"./outputs\"",
")",
"if",
"not",
"self",
".",
"result_file",
"==",
"''",
":",
"self",
".",
"ui",
".",
"show_all_curves",
".",
"setDisabled",
"(",
"False",
")",
"self",
".",
"ui",
".",
"show_grid",
".",
"setDisabled",
"(",
"False",
")",
"self",
".",
"data_processing",
"(",
")",
"self",
".",
"display_the_graphic",
"(",
"self",
".",
"num_line",
",",
"self",
".",
"wavelength",
",",
"self",
".",
"data_wanted",
",",
"self",
".",
"information",
")",
"self",
".",
"authorized_display",
"=",
"True"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.check_values
|
This function checks if there is no problem about values given.
If there is a problem with a or some values, their label's color is changed to red,
and call a function to display an error message.
If there is no problem, their label, if it is necessary, is changed to grey (default color).
|
gui/gui_mainLayout.py
|
def check_values(self):
"""
This function checks if there is no problem about values given.
If there is a problem with a or some values, their label's color is changed to red,
and call a function to display an error message.
If there is no problem, their label, if it is necessary, is changed to grey (default color).
"""
error_color = 'color: red'
no_error_color = 'color: 0.75' # light gray
self.error_batch_name = False
self.error_report_parameter = False
self.error_saa_result = False
self.error_sza_result = False
self.error_p_result = False
self.error_wavelength_result = False
self.error_x_result = False
self.error_y_result = False
self.error_g_result = False
self.error_s_result = False
self.error_z_result = False
self.error_verbose_result = False
self.error_phytoplankton_path_result = False
self.error_bottom_path_result = False
self.error_executive_path_result = False
"""
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
No particular checking for paths!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
if self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE:
check_num = '(.*)' # Regular expression to use
self.prog = re.compile(check_num) # Analysis object creation
batch_name_result = self.prog.search(
self.batch_name_value) # String retrieval thanks to the regular expression
report_parameter_result = self.prog.search(self.report_parameter_value)
try:
if (self.ui.batch_name_value.text().isEmpty()) | (
batch_name_result.group() != self.ui.batch_name_value.text()):
self.ui.batch_name_label.setStyleSheet(error_color)
self.error_batch_name = True
else:
self.ui.batch_name_label.setStyleSheet(no_error_color)
self.error_batch_name = False
except AttributeError:
self.ui.batch_name_label.setStyleSheet(error_color)
self.error_batch_name = True
try:
if (self.ui.report_parameter_value.text().isEmpty()) | (
report_parameter_result.group() != self.ui.report_parameter_value.text()):
self.ui.report_parameter_label.setStyleSheet(error_color)
self.error_report_parameter = True
else:
self.ui.report_parameter_label.setStyleSheet(no_error_color)
self.error_report_parameter = False
except AttributeError:
self.ui.report_parameter_label.setStyleSheet(error_color)
self.error_report_parameter = True
# -----------------------------------------------------------#
# The following checks values separate by comas without space.
# -----------------------------------------------------------#
"""
Problem : The user can write just one letter or starts with a dot or finishes the list with a dot.
"""
# check_num_1 = '(^([0-9]+[.]?[0-9]*[,]?){0,}[^,])|(^([0-9]*[,]){1,}[^,])' # Regular expression to use
# self.prog_1 = re.compile(check_num_1) # Analysis object creation
# self.wavelength_values = str(self.wavelength_values).translate(None, ' ').strip(' ')
# # print(self.wavelength_values)
# p_result = self.prog_1.search(self.p_values) # String retrieval thanks to the regular expression
# saa_result = self.prog_1.search(self.saa_values)
# sza_result = self.prog_1.search(self.sza_values)
# wavelength_result = self.prog_1.search(self.wavelength_values)
# # print(wavelength_result.group())
#
# try:
#
# if saa_result.group() != self.ui.saa_values.text():
# self.ui.saa_label.setStyleSheet(error_color)
# self.error_sza_result = True
# else:
# self.ui.saa_label.setStyleSheet(no_error_color)
# self.error_sza_result = False
# except AttributeError:
# self.ui.saa_label.setStyleSheet(error_color)
# self.error_sza_result = True
# try:
# if sza_result.group() != self.ui.sza_values.text():
# self.ui.sza_label.setStyleSheet(error_color)
# self.error_saa_result = True
# else:
# self.ui.sza_label.setStyleSheet(no_error_color)
# self.error_saa_result = False
# except AttributeError:
# self.ui.sza_label.setStyleSheet(error_color)
# self.error_saa_result = True
# try:
# if p_result.group() != self.ui.p_values.text():
# self.ui.p_label.setStyleSheet(error_color)
# self.error_p_result = True
# else:
# self.ui.p_label.setStyleSheet(no_error_color)
# self.error_p_result = False
# except AttributeError:
# self.ui.p_label.setStyleSheet(error_color)
# self.error_p_result = True
# try:
# if wavelength_result.group() != str(self.ui.wavelength_values.text()).translate(None, ' ').strip(' '):
# self.ui.waveL_label.setStyleSheet(error_color)
# self.error_wavelength_result = True
# else:
# self.ui.waveL_label.setStyleSheet(no_error_color)
# self.error_wavelength_result = False
# except AttributeError:
# self.ui.waveL_label.setStyleSheet(error_color)
# self.error_wavelength_result = True
# ---------------------------------------------------#
# The following checks values containing only numbers.
# ---------------------------------------------------#
check_num_2 = '(^([0-9]+[.]?[0-9]*[,]?){0,}[^,])|(^([0-9]+[,]){1,}[^,])'
self.prog_2 = re.compile(check_num_2)
x_result = self.prog_2.search(self.x_value)
y_result = self.prog_2.search(self.y_value)
g_result = self.prog_2.search(self.g_value)
s_result = self.prog_2.search(self.s_value)
z_result = self.prog_2.search(self.z_value)
try:
if x_result.group() != self.ui.x_value.text():
self.ui.particles_label.setStyleSheet(error_color)
self.ui.x_label.setStyleSheet(error_color)
self.error_x_result = True
else:
self.ui.particles_label.setStyleSheet(no_error_color)
self.ui.x_label.setStyleSheet(no_error_color)
self.error_x_result = False
except AttributeError:
self.ui.particles_label.setStyleSheet(no_error_color)
self.ui.x_label.setStyleSheet(error_color)
self.error_x_result = True
try:
if y_result.group() != self.ui.y_value.text():
self.ui.particles_label.setStyleSheet(error_color)
self.ui.y_label.setStyleSheet(error_color)
self.error_y_result = True
else:
self.ui.particles_label.setStyleSheet(no_error_color)
self.ui.y_label.setStyleSheet(no_error_color)
self.error_y_result = False
except AttributeError:
self.ui.particles_label.setStyleSheet(error_color)
self.ui.y_label.setStyleSheet(error_color)
self.error_y_result = True
try:
if g_result.group() != self.ui.g_value.text():
self.ui.organic_label.setStyleSheet(error_color)
self.ui.g_label.setStyleSheet(error_color)
self.error_g_result = True
else:
self.ui.organic_label.setStyleSheet(no_error_color)
self.ui.g_label.setStyleSheet(no_error_color)
self.error_g_result = False
except AttributeError:
self.ui.organic_label.setStyleSheet(error_color)
self.ui.g_label.setStyleSheet(error_color)
self.error_g_result = True
try:
if s_result.group() != self.ui.s_value.text():
self.ui.organic_label.setStyleSheet(error_color)
self.ui.s_label.setStyleSheet(error_color)
self.error_s_result = True
else:
self.ui.organic_label.setStyleSheet(no_error_color)
self.ui.s_label.setStyleSheet(no_error_color)
self.error_x_result = False
except AttributeError:
self.ui.organic_label.setStyleSheet(error_color)
self.ui.s_label.setStyleSheet(error_color)
self.error_x_result = True
try:
if z_result.group() != self.ui.z_value.text():
self.ui.z_label.setStyleSheet(error_color)
self.error_z_result = True
else:
self.ui.z_label.setStyleSheet(no_error_color)
self.error_z_result = False
except AttributeError:
self.ui.z_label.setStyleSheet(error_color)
self.error_z_result = True
check_num_3 = '[1-6]+'
self.prog_3 = re.compile(check_num_3)
verbose_result = self.prog_3.search(self.verbose_value)
try:
if verbose_result.group() != self.ui.verbose_value.text():
self.ui.verbose_label.setStyleSheet(error_color)
self.error_verbose_result = True
else:
self.ui.verbose_label.setStyleSheet(no_error_color)
self.error_verbose_result = False
except AttributeError:
self.ui.verbose_label.setStyleSheet(error_color)
self.error_verbose_result = True
# ------------------------------------------------#
# The following checks values containing only path.
# ------------------------------------------------#
"""
#!!!!!!!!!!!!!!!!!!!!!!!!!!
#Syntax test doesn't work ! -> #check_num4 = '[/]([A-Za-z]+[/]?)+[A-Za-z]$'
#!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
check_num_4 = '(.*)' # Take all strings possible.
self.prog_4 = re.compile(check_num_4)
phytoplankton_path_result = self.prog_4.search(self.phytoplankton_path)
bottom_path_result = self.prog_4.search(self.bottom_path)
executive_path_result = self.prog_4.search(self.executive_path)
try:
if phytoplankton_path_result.group() != self.ui.phyto_path.text():
self.ui.phyto_label.setStyleSheet(error_color)
self.error_phytoplankton_path_result = True
else:
self.ui.phyto_label.setStyleSheet(no_error_color)
self.error_phytoplankton_path_result = False
except AttributeError:
self.ui.phyto_label.setStyleSheet(error_color)
self.error_phytoplankton_path_result = True
try:
if bottom_path_result.group() != self.ui.bottom_path.text():
self.ui.bottom_label.setStyleSheet(error_color)
self.error_bottom_path_result = True
else:
self.ui.bottom_label.setStyleSheet(no_error_color)
self.error_bottom_path_result = False
except AttributeError:
self.ui.bottom_label.setStyleSheet(error_color)
self.error_bottom_path_result = True
try:
if executive_path_result.group() != self.ui.exec_path.text():
self.ui.execPath_label.setStyleSheet(error_color)
self.error_executive_path_result = True
else:
self.ui.execPath_label.setStyleSheet(no_error_color)
self.error_executive_path_result = False
except AttributeError:
self.ui.execPath_label.setStyleSheet(error_color)
self.error_executive_path_result = True
if (self.error_batch_name == True) | (self.error_report_parameter == True) | (
self.error_saa_result == True) | (self.error_sza_result == True) | (
self.error_p_result == True) | (self.error_wavelength_result == True) | (
self.error_x_result == True) | (self.error_y_result == True) | (
self.error_g_result == True) | (self.error_s_result == True) | (
self.error_z_result == True) | (self.error_verbose_result == True) | (
self.error_phytoplankton_path_result == True) | (self.error_bottom_path_result == True) | (
self.error_executive_path_result == True):
self.without_error = False
else:
self.without_error = True
|
def check_values(self):
"""
This function checks if there is no problem about values given.
If there is a problem with a or some values, their label's color is changed to red,
and call a function to display an error message.
If there is no problem, their label, if it is necessary, is changed to grey (default color).
"""
error_color = 'color: red'
no_error_color = 'color: 0.75' # light gray
self.error_batch_name = False
self.error_report_parameter = False
self.error_saa_result = False
self.error_sza_result = False
self.error_p_result = False
self.error_wavelength_result = False
self.error_x_result = False
self.error_y_result = False
self.error_g_result = False
self.error_s_result = False
self.error_z_result = False
self.error_verbose_result = False
self.error_phytoplankton_path_result = False
self.error_bottom_path_result = False
self.error_executive_path_result = False
"""
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
No particular checking for paths!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
if self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE:
check_num = '(.*)' # Regular expression to use
self.prog = re.compile(check_num) # Analysis object creation
batch_name_result = self.prog.search(
self.batch_name_value) # String retrieval thanks to the regular expression
report_parameter_result = self.prog.search(self.report_parameter_value)
try:
if (self.ui.batch_name_value.text().isEmpty()) | (
batch_name_result.group() != self.ui.batch_name_value.text()):
self.ui.batch_name_label.setStyleSheet(error_color)
self.error_batch_name = True
else:
self.ui.batch_name_label.setStyleSheet(no_error_color)
self.error_batch_name = False
except AttributeError:
self.ui.batch_name_label.setStyleSheet(error_color)
self.error_batch_name = True
try:
if (self.ui.report_parameter_value.text().isEmpty()) | (
report_parameter_result.group() != self.ui.report_parameter_value.text()):
self.ui.report_parameter_label.setStyleSheet(error_color)
self.error_report_parameter = True
else:
self.ui.report_parameter_label.setStyleSheet(no_error_color)
self.error_report_parameter = False
except AttributeError:
self.ui.report_parameter_label.setStyleSheet(error_color)
self.error_report_parameter = True
# -----------------------------------------------------------#
# The following checks values separate by comas without space.
# -----------------------------------------------------------#
"""
Problem : The user can write just one letter or starts with a dot or finishes the list with a dot.
"""
# check_num_1 = '(^([0-9]+[.]?[0-9]*[,]?){0,}[^,])|(^([0-9]*[,]){1,}[^,])' # Regular expression to use
# self.prog_1 = re.compile(check_num_1) # Analysis object creation
# self.wavelength_values = str(self.wavelength_values).translate(None, ' ').strip(' ')
# # print(self.wavelength_values)
# p_result = self.prog_1.search(self.p_values) # String retrieval thanks to the regular expression
# saa_result = self.prog_1.search(self.saa_values)
# sza_result = self.prog_1.search(self.sza_values)
# wavelength_result = self.prog_1.search(self.wavelength_values)
# # print(wavelength_result.group())
#
# try:
#
# if saa_result.group() != self.ui.saa_values.text():
# self.ui.saa_label.setStyleSheet(error_color)
# self.error_sza_result = True
# else:
# self.ui.saa_label.setStyleSheet(no_error_color)
# self.error_sza_result = False
# except AttributeError:
# self.ui.saa_label.setStyleSheet(error_color)
# self.error_sza_result = True
# try:
# if sza_result.group() != self.ui.sza_values.text():
# self.ui.sza_label.setStyleSheet(error_color)
# self.error_saa_result = True
# else:
# self.ui.sza_label.setStyleSheet(no_error_color)
# self.error_saa_result = False
# except AttributeError:
# self.ui.sza_label.setStyleSheet(error_color)
# self.error_saa_result = True
# try:
# if p_result.group() != self.ui.p_values.text():
# self.ui.p_label.setStyleSheet(error_color)
# self.error_p_result = True
# else:
# self.ui.p_label.setStyleSheet(no_error_color)
# self.error_p_result = False
# except AttributeError:
# self.ui.p_label.setStyleSheet(error_color)
# self.error_p_result = True
# try:
# if wavelength_result.group() != str(self.ui.wavelength_values.text()).translate(None, ' ').strip(' '):
# self.ui.waveL_label.setStyleSheet(error_color)
# self.error_wavelength_result = True
# else:
# self.ui.waveL_label.setStyleSheet(no_error_color)
# self.error_wavelength_result = False
# except AttributeError:
# self.ui.waveL_label.setStyleSheet(error_color)
# self.error_wavelength_result = True
# ---------------------------------------------------#
# The following checks values containing only numbers.
# ---------------------------------------------------#
check_num_2 = '(^([0-9]+[.]?[0-9]*[,]?){0,}[^,])|(^([0-9]+[,]){1,}[^,])'
self.prog_2 = re.compile(check_num_2)
x_result = self.prog_2.search(self.x_value)
y_result = self.prog_2.search(self.y_value)
g_result = self.prog_2.search(self.g_value)
s_result = self.prog_2.search(self.s_value)
z_result = self.prog_2.search(self.z_value)
try:
if x_result.group() != self.ui.x_value.text():
self.ui.particles_label.setStyleSheet(error_color)
self.ui.x_label.setStyleSheet(error_color)
self.error_x_result = True
else:
self.ui.particles_label.setStyleSheet(no_error_color)
self.ui.x_label.setStyleSheet(no_error_color)
self.error_x_result = False
except AttributeError:
self.ui.particles_label.setStyleSheet(no_error_color)
self.ui.x_label.setStyleSheet(error_color)
self.error_x_result = True
try:
if y_result.group() != self.ui.y_value.text():
self.ui.particles_label.setStyleSheet(error_color)
self.ui.y_label.setStyleSheet(error_color)
self.error_y_result = True
else:
self.ui.particles_label.setStyleSheet(no_error_color)
self.ui.y_label.setStyleSheet(no_error_color)
self.error_y_result = False
except AttributeError:
self.ui.particles_label.setStyleSheet(error_color)
self.ui.y_label.setStyleSheet(error_color)
self.error_y_result = True
try:
if g_result.group() != self.ui.g_value.text():
self.ui.organic_label.setStyleSheet(error_color)
self.ui.g_label.setStyleSheet(error_color)
self.error_g_result = True
else:
self.ui.organic_label.setStyleSheet(no_error_color)
self.ui.g_label.setStyleSheet(no_error_color)
self.error_g_result = False
except AttributeError:
self.ui.organic_label.setStyleSheet(error_color)
self.ui.g_label.setStyleSheet(error_color)
self.error_g_result = True
try:
if s_result.group() != self.ui.s_value.text():
self.ui.organic_label.setStyleSheet(error_color)
self.ui.s_label.setStyleSheet(error_color)
self.error_s_result = True
else:
self.ui.organic_label.setStyleSheet(no_error_color)
self.ui.s_label.setStyleSheet(no_error_color)
self.error_x_result = False
except AttributeError:
self.ui.organic_label.setStyleSheet(error_color)
self.ui.s_label.setStyleSheet(error_color)
self.error_x_result = True
try:
if z_result.group() != self.ui.z_value.text():
self.ui.z_label.setStyleSheet(error_color)
self.error_z_result = True
else:
self.ui.z_label.setStyleSheet(no_error_color)
self.error_z_result = False
except AttributeError:
self.ui.z_label.setStyleSheet(error_color)
self.error_z_result = True
check_num_3 = '[1-6]+'
self.prog_3 = re.compile(check_num_3)
verbose_result = self.prog_3.search(self.verbose_value)
try:
if verbose_result.group() != self.ui.verbose_value.text():
self.ui.verbose_label.setStyleSheet(error_color)
self.error_verbose_result = True
else:
self.ui.verbose_label.setStyleSheet(no_error_color)
self.error_verbose_result = False
except AttributeError:
self.ui.verbose_label.setStyleSheet(error_color)
self.error_verbose_result = True
# ------------------------------------------------#
# The following checks values containing only path.
# ------------------------------------------------#
"""
#!!!!!!!!!!!!!!!!!!!!!!!!!!
#Syntax test doesn't work ! -> #check_num4 = '[/]([A-Za-z]+[/]?)+[A-Za-z]$'
#!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
check_num_4 = '(.*)' # Take all strings possible.
self.prog_4 = re.compile(check_num_4)
phytoplankton_path_result = self.prog_4.search(self.phytoplankton_path)
bottom_path_result = self.prog_4.search(self.bottom_path)
executive_path_result = self.prog_4.search(self.executive_path)
try:
if phytoplankton_path_result.group() != self.ui.phyto_path.text():
self.ui.phyto_label.setStyleSheet(error_color)
self.error_phytoplankton_path_result = True
else:
self.ui.phyto_label.setStyleSheet(no_error_color)
self.error_phytoplankton_path_result = False
except AttributeError:
self.ui.phyto_label.setStyleSheet(error_color)
self.error_phytoplankton_path_result = True
try:
if bottom_path_result.group() != self.ui.bottom_path.text():
self.ui.bottom_label.setStyleSheet(error_color)
self.error_bottom_path_result = True
else:
self.ui.bottom_label.setStyleSheet(no_error_color)
self.error_bottom_path_result = False
except AttributeError:
self.ui.bottom_label.setStyleSheet(error_color)
self.error_bottom_path_result = True
try:
if executive_path_result.group() != self.ui.exec_path.text():
self.ui.execPath_label.setStyleSheet(error_color)
self.error_executive_path_result = True
else:
self.ui.execPath_label.setStyleSheet(no_error_color)
self.error_executive_path_result = False
except AttributeError:
self.ui.execPath_label.setStyleSheet(error_color)
self.error_executive_path_result = True
if (self.error_batch_name == True) | (self.error_report_parameter == True) | (
self.error_saa_result == True) | (self.error_sza_result == True) | (
self.error_p_result == True) | (self.error_wavelength_result == True) | (
self.error_x_result == True) | (self.error_y_result == True) | (
self.error_g_result == True) | (self.error_s_result == True) | (
self.error_z_result == True) | (self.error_verbose_result == True) | (
self.error_phytoplankton_path_result == True) | (self.error_bottom_path_result == True) | (
self.error_executive_path_result == True):
self.without_error = False
else:
self.without_error = True
|
[
"This",
"function",
"checks",
"if",
"there",
"is",
"no",
"problem",
"about",
"values",
"given",
".",
"If",
"there",
"is",
"a",
"problem",
"with",
"a",
"or",
"some",
"values",
"their",
"label",
"s",
"color",
"is",
"changed",
"to",
"red",
"and",
"call",
"a",
"function",
"to",
"display",
"an",
"error",
"message",
".",
"If",
"there",
"is",
"no",
"problem",
"their",
"label",
"if",
"it",
"is",
"necessary",
"is",
"changed",
"to",
"grey",
"(",
"default",
"color",
")",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L196-L464
|
[
"def",
"check_values",
"(",
"self",
")",
":",
"error_color",
"=",
"'color: red'",
"no_error_color",
"=",
"'color: 0.75'",
"# light gray",
"self",
".",
"error_batch_name",
"=",
"False",
"self",
".",
"error_report_parameter",
"=",
"False",
"self",
".",
"error_saa_result",
"=",
"False",
"self",
".",
"error_sza_result",
"=",
"False",
"self",
".",
"error_p_result",
"=",
"False",
"self",
".",
"error_wavelength_result",
"=",
"False",
"self",
".",
"error_x_result",
"=",
"False",
"self",
".",
"error_y_result",
"=",
"False",
"self",
".",
"error_g_result",
"=",
"False",
"self",
".",
"error_s_result",
"=",
"False",
"self",
".",
"error_z_result",
"=",
"False",
"self",
".",
"error_verbose_result",
"=",
"False",
"self",
".",
"error_phytoplankton_path_result",
"=",
"False",
"self",
".",
"error_bottom_path_result",
"=",
"False",
"self",
".",
"error_executive_path_result",
"=",
"False",
"\"\"\"\n !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n No particular checking for paths!\n !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n \"\"\"",
"if",
"self",
".",
"ui",
".",
"tabWidget",
".",
"currentIndex",
"(",
")",
"==",
"TabWidget",
".",
"NORMAL_MODE",
":",
"check_num",
"=",
"'(.*)'",
"# Regular expression to use",
"self",
".",
"prog",
"=",
"re",
".",
"compile",
"(",
"check_num",
")",
"# Analysis object creation",
"batch_name_result",
"=",
"self",
".",
"prog",
".",
"search",
"(",
"self",
".",
"batch_name_value",
")",
"# String retrieval thanks to the regular expression",
"report_parameter_result",
"=",
"self",
".",
"prog",
".",
"search",
"(",
"self",
".",
"report_parameter_value",
")",
"try",
":",
"if",
"(",
"self",
".",
"ui",
".",
"batch_name_value",
".",
"text",
"(",
")",
".",
"isEmpty",
"(",
")",
")",
"|",
"(",
"batch_name_result",
".",
"group",
"(",
")",
"!=",
"self",
".",
"ui",
".",
"batch_name_value",
".",
"text",
"(",
")",
")",
":",
"self",
".",
"ui",
".",
"batch_name_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_batch_name",
"=",
"True",
"else",
":",
"self",
".",
"ui",
".",
"batch_name_label",
".",
"setStyleSheet",
"(",
"no_error_color",
")",
"self",
".",
"error_batch_name",
"=",
"False",
"except",
"AttributeError",
":",
"self",
".",
"ui",
".",
"batch_name_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_batch_name",
"=",
"True",
"try",
":",
"if",
"(",
"self",
".",
"ui",
".",
"report_parameter_value",
".",
"text",
"(",
")",
".",
"isEmpty",
"(",
")",
")",
"|",
"(",
"report_parameter_result",
".",
"group",
"(",
")",
"!=",
"self",
".",
"ui",
".",
"report_parameter_value",
".",
"text",
"(",
")",
")",
":",
"self",
".",
"ui",
".",
"report_parameter_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_report_parameter",
"=",
"True",
"else",
":",
"self",
".",
"ui",
".",
"report_parameter_label",
".",
"setStyleSheet",
"(",
"no_error_color",
")",
"self",
".",
"error_report_parameter",
"=",
"False",
"except",
"AttributeError",
":",
"self",
".",
"ui",
".",
"report_parameter_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_report_parameter",
"=",
"True",
"# -----------------------------------------------------------#",
"# The following checks values separate by comas without space.",
"# -----------------------------------------------------------#",
"\"\"\"\n Problem : The user can write just one letter or starts with a dot or finishes the list with a dot.\n \"\"\"",
"# check_num_1 = '(^([0-9]+[.]?[0-9]*[,]?){0,}[^,])|(^([0-9]*[,]){1,}[^,])' # Regular expression to use",
"# self.prog_1 = re.compile(check_num_1) # Analysis object creation",
"# self.wavelength_values = str(self.wavelength_values).translate(None, ' ').strip(' ')",
"# # print(self.wavelength_values)",
"# p_result = self.prog_1.search(self.p_values) # String retrieval thanks to the regular expression",
"# saa_result = self.prog_1.search(self.saa_values)",
"# sza_result = self.prog_1.search(self.sza_values)",
"# wavelength_result = self.prog_1.search(self.wavelength_values)",
"# # print(wavelength_result.group())",
"#",
"# try:",
"#",
"# if saa_result.group() != self.ui.saa_values.text():",
"# self.ui.saa_label.setStyleSheet(error_color)",
"# self.error_sza_result = True",
"# else:",
"# self.ui.saa_label.setStyleSheet(no_error_color)",
"# self.error_sza_result = False",
"# except AttributeError:",
"# self.ui.saa_label.setStyleSheet(error_color)",
"# self.error_sza_result = True",
"# try:",
"# if sza_result.group() != self.ui.sza_values.text():",
"# self.ui.sza_label.setStyleSheet(error_color)",
"# self.error_saa_result = True",
"# else:",
"# self.ui.sza_label.setStyleSheet(no_error_color)",
"# self.error_saa_result = False",
"# except AttributeError:",
"# self.ui.sza_label.setStyleSheet(error_color)",
"# self.error_saa_result = True",
"# try:",
"# if p_result.group() != self.ui.p_values.text():",
"# self.ui.p_label.setStyleSheet(error_color)",
"# self.error_p_result = True",
"# else:",
"# self.ui.p_label.setStyleSheet(no_error_color)",
"# self.error_p_result = False",
"# except AttributeError:",
"# self.ui.p_label.setStyleSheet(error_color)",
"# self.error_p_result = True",
"# try:",
"# if wavelength_result.group() != str(self.ui.wavelength_values.text()).translate(None, ' ').strip(' '):",
"# self.ui.waveL_label.setStyleSheet(error_color)",
"# self.error_wavelength_result = True",
"# else:",
"# self.ui.waveL_label.setStyleSheet(no_error_color)",
"# self.error_wavelength_result = False",
"# except AttributeError:",
"# self.ui.waveL_label.setStyleSheet(error_color)",
"# self.error_wavelength_result = True",
"# ---------------------------------------------------#",
"# The following checks values containing only numbers.",
"# ---------------------------------------------------#",
"check_num_2",
"=",
"'(^([0-9]+[.]?[0-9]*[,]?){0,}[^,])|(^([0-9]+[,]){1,}[^,])'",
"self",
".",
"prog_2",
"=",
"re",
".",
"compile",
"(",
"check_num_2",
")",
"x_result",
"=",
"self",
".",
"prog_2",
".",
"search",
"(",
"self",
".",
"x_value",
")",
"y_result",
"=",
"self",
".",
"prog_2",
".",
"search",
"(",
"self",
".",
"y_value",
")",
"g_result",
"=",
"self",
".",
"prog_2",
".",
"search",
"(",
"self",
".",
"g_value",
")",
"s_result",
"=",
"self",
".",
"prog_2",
".",
"search",
"(",
"self",
".",
"s_value",
")",
"z_result",
"=",
"self",
".",
"prog_2",
".",
"search",
"(",
"self",
".",
"z_value",
")",
"try",
":",
"if",
"x_result",
".",
"group",
"(",
")",
"!=",
"self",
".",
"ui",
".",
"x_value",
".",
"text",
"(",
")",
":",
"self",
".",
"ui",
".",
"particles_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"ui",
".",
"x_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_x_result",
"=",
"True",
"else",
":",
"self",
".",
"ui",
".",
"particles_label",
".",
"setStyleSheet",
"(",
"no_error_color",
")",
"self",
".",
"ui",
".",
"x_label",
".",
"setStyleSheet",
"(",
"no_error_color",
")",
"self",
".",
"error_x_result",
"=",
"False",
"except",
"AttributeError",
":",
"self",
".",
"ui",
".",
"particles_label",
".",
"setStyleSheet",
"(",
"no_error_color",
")",
"self",
".",
"ui",
".",
"x_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_x_result",
"=",
"True",
"try",
":",
"if",
"y_result",
".",
"group",
"(",
")",
"!=",
"self",
".",
"ui",
".",
"y_value",
".",
"text",
"(",
")",
":",
"self",
".",
"ui",
".",
"particles_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"ui",
".",
"y_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_y_result",
"=",
"True",
"else",
":",
"self",
".",
"ui",
".",
"particles_label",
".",
"setStyleSheet",
"(",
"no_error_color",
")",
"self",
".",
"ui",
".",
"y_label",
".",
"setStyleSheet",
"(",
"no_error_color",
")",
"self",
".",
"error_y_result",
"=",
"False",
"except",
"AttributeError",
":",
"self",
".",
"ui",
".",
"particles_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"ui",
".",
"y_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_y_result",
"=",
"True",
"try",
":",
"if",
"g_result",
".",
"group",
"(",
")",
"!=",
"self",
".",
"ui",
".",
"g_value",
".",
"text",
"(",
")",
":",
"self",
".",
"ui",
".",
"organic_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"ui",
".",
"g_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_g_result",
"=",
"True",
"else",
":",
"self",
".",
"ui",
".",
"organic_label",
".",
"setStyleSheet",
"(",
"no_error_color",
")",
"self",
".",
"ui",
".",
"g_label",
".",
"setStyleSheet",
"(",
"no_error_color",
")",
"self",
".",
"error_g_result",
"=",
"False",
"except",
"AttributeError",
":",
"self",
".",
"ui",
".",
"organic_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"ui",
".",
"g_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_g_result",
"=",
"True",
"try",
":",
"if",
"s_result",
".",
"group",
"(",
")",
"!=",
"self",
".",
"ui",
".",
"s_value",
".",
"text",
"(",
")",
":",
"self",
".",
"ui",
".",
"organic_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"ui",
".",
"s_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_s_result",
"=",
"True",
"else",
":",
"self",
".",
"ui",
".",
"organic_label",
".",
"setStyleSheet",
"(",
"no_error_color",
")",
"self",
".",
"ui",
".",
"s_label",
".",
"setStyleSheet",
"(",
"no_error_color",
")",
"self",
".",
"error_x_result",
"=",
"False",
"except",
"AttributeError",
":",
"self",
".",
"ui",
".",
"organic_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"ui",
".",
"s_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_x_result",
"=",
"True",
"try",
":",
"if",
"z_result",
".",
"group",
"(",
")",
"!=",
"self",
".",
"ui",
".",
"z_value",
".",
"text",
"(",
")",
":",
"self",
".",
"ui",
".",
"z_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_z_result",
"=",
"True",
"else",
":",
"self",
".",
"ui",
".",
"z_label",
".",
"setStyleSheet",
"(",
"no_error_color",
")",
"self",
".",
"error_z_result",
"=",
"False",
"except",
"AttributeError",
":",
"self",
".",
"ui",
".",
"z_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_z_result",
"=",
"True",
"check_num_3",
"=",
"'[1-6]+'",
"self",
".",
"prog_3",
"=",
"re",
".",
"compile",
"(",
"check_num_3",
")",
"verbose_result",
"=",
"self",
".",
"prog_3",
".",
"search",
"(",
"self",
".",
"verbose_value",
")",
"try",
":",
"if",
"verbose_result",
".",
"group",
"(",
")",
"!=",
"self",
".",
"ui",
".",
"verbose_value",
".",
"text",
"(",
")",
":",
"self",
".",
"ui",
".",
"verbose_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_verbose_result",
"=",
"True",
"else",
":",
"self",
".",
"ui",
".",
"verbose_label",
".",
"setStyleSheet",
"(",
"no_error_color",
")",
"self",
".",
"error_verbose_result",
"=",
"False",
"except",
"AttributeError",
":",
"self",
".",
"ui",
".",
"verbose_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_verbose_result",
"=",
"True",
"# ------------------------------------------------#",
"# The following checks values containing only path.",
"# ------------------------------------------------#",
"\"\"\"\n #!!!!!!!!!!!!!!!!!!!!!!!!!!\n #Syntax test doesn't work ! -> #check_num4 = '[/]([A-Za-z]+[/]?)+[A-Za-z]$'\n #!!!!!!!!!!!!!!!!!!!!!!!!!!\n \"\"\"",
"check_num_4",
"=",
"'(.*)'",
"# Take all strings possible.",
"self",
".",
"prog_4",
"=",
"re",
".",
"compile",
"(",
"check_num_4",
")",
"phytoplankton_path_result",
"=",
"self",
".",
"prog_4",
".",
"search",
"(",
"self",
".",
"phytoplankton_path",
")",
"bottom_path_result",
"=",
"self",
".",
"prog_4",
".",
"search",
"(",
"self",
".",
"bottom_path",
")",
"executive_path_result",
"=",
"self",
".",
"prog_4",
".",
"search",
"(",
"self",
".",
"executive_path",
")",
"try",
":",
"if",
"phytoplankton_path_result",
".",
"group",
"(",
")",
"!=",
"self",
".",
"ui",
".",
"phyto_path",
".",
"text",
"(",
")",
":",
"self",
".",
"ui",
".",
"phyto_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_phytoplankton_path_result",
"=",
"True",
"else",
":",
"self",
".",
"ui",
".",
"phyto_label",
".",
"setStyleSheet",
"(",
"no_error_color",
")",
"self",
".",
"error_phytoplankton_path_result",
"=",
"False",
"except",
"AttributeError",
":",
"self",
".",
"ui",
".",
"phyto_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_phytoplankton_path_result",
"=",
"True",
"try",
":",
"if",
"bottom_path_result",
".",
"group",
"(",
")",
"!=",
"self",
".",
"ui",
".",
"bottom_path",
".",
"text",
"(",
")",
":",
"self",
".",
"ui",
".",
"bottom_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_bottom_path_result",
"=",
"True",
"else",
":",
"self",
".",
"ui",
".",
"bottom_label",
".",
"setStyleSheet",
"(",
"no_error_color",
")",
"self",
".",
"error_bottom_path_result",
"=",
"False",
"except",
"AttributeError",
":",
"self",
".",
"ui",
".",
"bottom_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_bottom_path_result",
"=",
"True",
"try",
":",
"if",
"executive_path_result",
".",
"group",
"(",
")",
"!=",
"self",
".",
"ui",
".",
"exec_path",
".",
"text",
"(",
")",
":",
"self",
".",
"ui",
".",
"execPath_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_executive_path_result",
"=",
"True",
"else",
":",
"self",
".",
"ui",
".",
"execPath_label",
".",
"setStyleSheet",
"(",
"no_error_color",
")",
"self",
".",
"error_executive_path_result",
"=",
"False",
"except",
"AttributeError",
":",
"self",
".",
"ui",
".",
"execPath_label",
".",
"setStyleSheet",
"(",
"error_color",
")",
"self",
".",
"error_executive_path_result",
"=",
"True",
"if",
"(",
"self",
".",
"error_batch_name",
"==",
"True",
")",
"|",
"(",
"self",
".",
"error_report_parameter",
"==",
"True",
")",
"|",
"(",
"self",
".",
"error_saa_result",
"==",
"True",
")",
"|",
"(",
"self",
".",
"error_sza_result",
"==",
"True",
")",
"|",
"(",
"self",
".",
"error_p_result",
"==",
"True",
")",
"|",
"(",
"self",
".",
"error_wavelength_result",
"==",
"True",
")",
"|",
"(",
"self",
".",
"error_x_result",
"==",
"True",
")",
"|",
"(",
"self",
".",
"error_y_result",
"==",
"True",
")",
"|",
"(",
"self",
".",
"error_g_result",
"==",
"True",
")",
"|",
"(",
"self",
".",
"error_s_result",
"==",
"True",
")",
"|",
"(",
"self",
".",
"error_z_result",
"==",
"True",
")",
"|",
"(",
"self",
".",
"error_verbose_result",
"==",
"True",
")",
"|",
"(",
"self",
".",
"error_phytoplankton_path_result",
"==",
"True",
")",
"|",
"(",
"self",
".",
"error_bottom_path_result",
"==",
"True",
")",
"|",
"(",
"self",
".",
"error_executive_path_result",
"==",
"True",
")",
":",
"self",
".",
"without_error",
"=",
"False",
"else",
":",
"self",
".",
"without_error",
"=",
"True"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.write_to_file
|
This function calls "gui_batch.py" with inputs values to write the batch file.
|
gui/gui_mainLayout.py
|
def write_to_file(self):
"""
This function calls "gui_batch.py" with inputs values to write the batch file.
"""
bt = BatchFile(self.batch_name_value, self.p_values, self.x_value, self.y_value, self.g_value, self.s_value,
self.z_value, self.wavelength_values, self.verbose_value, self.phytoplankton_path,
self.bottom_path, self.nb_cpu, self.executive_path, self.saa_values,
self.sza_values, self.report_parameter_value)
# bt.write_batch_to_file(str(self.batch_name_value + "_batch.txt"))
bt.write_batch_to_file(str(self.batch_name_value + "_batch.txt"))
|
def write_to_file(self):
"""
This function calls "gui_batch.py" with inputs values to write the batch file.
"""
bt = BatchFile(self.batch_name_value, self.p_values, self.x_value, self.y_value, self.g_value, self.s_value,
self.z_value, self.wavelength_values, self.verbose_value, self.phytoplankton_path,
self.bottom_path, self.nb_cpu, self.executive_path, self.saa_values,
self.sza_values, self.report_parameter_value)
# bt.write_batch_to_file(str(self.batch_name_value + "_batch.txt"))
bt.write_batch_to_file(str(self.batch_name_value + "_batch.txt"))
|
[
"This",
"function",
"calls",
"gui_batch",
".",
"py",
"with",
"inputs",
"values",
"to",
"write",
"the",
"batch",
"file",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L466-L475
|
[
"def",
"write_to_file",
"(",
"self",
")",
":",
"bt",
"=",
"BatchFile",
"(",
"self",
".",
"batch_name_value",
",",
"self",
".",
"p_values",
",",
"self",
".",
"x_value",
",",
"self",
".",
"y_value",
",",
"self",
".",
"g_value",
",",
"self",
".",
"s_value",
",",
"self",
".",
"z_value",
",",
"self",
".",
"wavelength_values",
",",
"self",
".",
"verbose_value",
",",
"self",
".",
"phytoplankton_path",
",",
"self",
".",
"bottom_path",
",",
"self",
".",
"nb_cpu",
",",
"self",
".",
"executive_path",
",",
"self",
".",
"saa_values",
",",
"self",
".",
"sza_values",
",",
"self",
".",
"report_parameter_value",
")",
"# bt.write_batch_to_file(str(self.batch_name_value + \"_batch.txt\"))",
"bt",
".",
"write_batch_to_file",
"(",
"str",
"(",
"self",
".",
"batch_name_value",
"+",
"\"_batch.txt\"",
")",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.data_processing
|
This function separates data, from the file to display curves, and will put them in the good arrays.
|
gui/gui_mainLayout.py
|
def data_processing(self):
"""
This function separates data, from the file to display curves, and will put them in the good arrays.
"""
the_file_name = str(self.result_file)
the_file = open(the_file_name, 'r')
lines = the_file.readlines()
# We put all lines in an array and we put each cell of the line in a column.
lines_array = []
for line in lines:
line = line.split(',') # Each time there is a tabulation, there is a new cell
lines_array.append(line)
labels_line = lines_array[0]
cell_labels_line = 0 # Iterator on each cell of the line labels_line.
flag = True # Become FALSE when we find the word which separate data from wavelength values.
try:
while flag: # While it is TRUE, so if the word doesn't match, it's an infinite loop,
if "wave length (nm)" in labels_line[cell_labels_line]:
index = labels_line.index(labels_line[cell_labels_line]) # Find the index of the string searched.
flag = False
else:
cell_labels_line += 1
except IndexError: # In case of an infinite loop.
raise sys.exit("Warning : There is no value named 'wavelength' in the file used to plot curves. "
"So, I can't separate data to plot curves and data about tests linking with these curves.")
self.information = [] # This array will contain the data displayed under the curves.
data_wavelength = [] # This array will contain the data to plot curves.
self.num_line = 0 # Iterator on each line of lines_array,
# The array containing data about information and wavelength.
for line in lines_array:
cell_line = 0 # Iterator on each cell of the line.
self.information.append([])
data_wavelength.append([])
while cell_line < len(line):
if cell_line < index:
self.information[self.num_line].append(line[cell_line])
elif cell_line > index:
data_wavelength[self.num_line].append(line[cell_line])
cell_line += 1
self.num_line += 1
# We transform wavelengths from strings to floats.
line_wavelength = 0 # Iterator on each line of data_wavelength
for row_data_wavelength in data_wavelength:
row_data_wavelength = [float(item.strip('\n').strip('\"')) for item in row_data_wavelength]
data_wavelength[line_wavelength] = row_data_wavelength
line_wavelength += 1
self.wavelength = data_wavelength[0] # The first line contains wavelength
self.data_wanted = data_wavelength[1:] # The others contain data useful to plot curves.
the_file.close()
|
def data_processing(self):
"""
This function separates data, from the file to display curves, and will put them in the good arrays.
"""
the_file_name = str(self.result_file)
the_file = open(the_file_name, 'r')
lines = the_file.readlines()
# We put all lines in an array and we put each cell of the line in a column.
lines_array = []
for line in lines:
line = line.split(',') # Each time there is a tabulation, there is a new cell
lines_array.append(line)
labels_line = lines_array[0]
cell_labels_line = 0 # Iterator on each cell of the line labels_line.
flag = True # Become FALSE when we find the word which separate data from wavelength values.
try:
while flag: # While it is TRUE, so if the word doesn't match, it's an infinite loop,
if "wave length (nm)" in labels_line[cell_labels_line]:
index = labels_line.index(labels_line[cell_labels_line]) # Find the index of the string searched.
flag = False
else:
cell_labels_line += 1
except IndexError: # In case of an infinite loop.
raise sys.exit("Warning : There is no value named 'wavelength' in the file used to plot curves. "
"So, I can't separate data to plot curves and data about tests linking with these curves.")
self.information = [] # This array will contain the data displayed under the curves.
data_wavelength = [] # This array will contain the data to plot curves.
self.num_line = 0 # Iterator on each line of lines_array,
# The array containing data about information and wavelength.
for line in lines_array:
cell_line = 0 # Iterator on each cell of the line.
self.information.append([])
data_wavelength.append([])
while cell_line < len(line):
if cell_line < index:
self.information[self.num_line].append(line[cell_line])
elif cell_line > index:
data_wavelength[self.num_line].append(line[cell_line])
cell_line += 1
self.num_line += 1
# We transform wavelengths from strings to floats.
line_wavelength = 0 # Iterator on each line of data_wavelength
for row_data_wavelength in data_wavelength:
row_data_wavelength = [float(item.strip('\n').strip('\"')) for item in row_data_wavelength]
data_wavelength[line_wavelength] = row_data_wavelength
line_wavelength += 1
self.wavelength = data_wavelength[0] # The first line contains wavelength
self.data_wanted = data_wavelength[1:] # The others contain data useful to plot curves.
the_file.close()
|
[
"This",
"function",
"separates",
"data",
"from",
"the",
"file",
"to",
"display",
"curves",
"and",
"will",
"put",
"them",
"in",
"the",
"good",
"arrays",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L477-L533
|
[
"def",
"data_processing",
"(",
"self",
")",
":",
"the_file_name",
"=",
"str",
"(",
"self",
".",
"result_file",
")",
"the_file",
"=",
"open",
"(",
"the_file_name",
",",
"'r'",
")",
"lines",
"=",
"the_file",
".",
"readlines",
"(",
")",
"# We put all lines in an array and we put each cell of the line in a column.",
"lines_array",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"line",
"=",
"line",
".",
"split",
"(",
"','",
")",
"# Each time there is a tabulation, there is a new cell",
"lines_array",
".",
"append",
"(",
"line",
")",
"labels_line",
"=",
"lines_array",
"[",
"0",
"]",
"cell_labels_line",
"=",
"0",
"# Iterator on each cell of the line labels_line.",
"flag",
"=",
"True",
"# Become FALSE when we find the word which separate data from wavelength values.",
"try",
":",
"while",
"flag",
":",
"# While it is TRUE, so if the word doesn't match, it's an infinite loop,",
"if",
"\"wave length (nm)\"",
"in",
"labels_line",
"[",
"cell_labels_line",
"]",
":",
"index",
"=",
"labels_line",
".",
"index",
"(",
"labels_line",
"[",
"cell_labels_line",
"]",
")",
"# Find the index of the string searched.",
"flag",
"=",
"False",
"else",
":",
"cell_labels_line",
"+=",
"1",
"except",
"IndexError",
":",
"# In case of an infinite loop.",
"raise",
"sys",
".",
"exit",
"(",
"\"Warning : There is no value named 'wavelength' in the file used to plot curves. \"",
"\"So, I can't separate data to plot curves and data about tests linking with these curves.\"",
")",
"self",
".",
"information",
"=",
"[",
"]",
"# This array will contain the data displayed under the curves.",
"data_wavelength",
"=",
"[",
"]",
"# This array will contain the data to plot curves.",
"self",
".",
"num_line",
"=",
"0",
"# Iterator on each line of lines_array,",
"# The array containing data about information and wavelength.",
"for",
"line",
"in",
"lines_array",
":",
"cell_line",
"=",
"0",
"# Iterator on each cell of the line.",
"self",
".",
"information",
".",
"append",
"(",
"[",
"]",
")",
"data_wavelength",
".",
"append",
"(",
"[",
"]",
")",
"while",
"cell_line",
"<",
"len",
"(",
"line",
")",
":",
"if",
"cell_line",
"<",
"index",
":",
"self",
".",
"information",
"[",
"self",
".",
"num_line",
"]",
".",
"append",
"(",
"line",
"[",
"cell_line",
"]",
")",
"elif",
"cell_line",
">",
"index",
":",
"data_wavelength",
"[",
"self",
".",
"num_line",
"]",
".",
"append",
"(",
"line",
"[",
"cell_line",
"]",
")",
"cell_line",
"+=",
"1",
"self",
".",
"num_line",
"+=",
"1",
"# We transform wavelengths from strings to floats.",
"line_wavelength",
"=",
"0",
"# Iterator on each line of data_wavelength",
"for",
"row_data_wavelength",
"in",
"data_wavelength",
":",
"row_data_wavelength",
"=",
"[",
"float",
"(",
"item",
".",
"strip",
"(",
"'\\n'",
")",
".",
"strip",
"(",
"'\\\"'",
")",
")",
"for",
"item",
"in",
"row_data_wavelength",
"]",
"data_wavelength",
"[",
"line_wavelength",
"]",
"=",
"row_data_wavelength",
"line_wavelength",
"+=",
"1",
"self",
".",
"wavelength",
"=",
"data_wavelength",
"[",
"0",
"]",
"# The first line contains wavelength",
"self",
".",
"data_wanted",
"=",
"data_wavelength",
"[",
"1",
":",
"]",
"# The others contain data useful to plot curves.",
"the_file",
".",
"close",
"(",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.display_the_graphic
|
This function calls the class "MplCanvas" of "gui_matplotlibwidgetFile.py" to plot results.
Inputs : num_line : The number of cases.
wavelength : The wavelengths.
data_wanted : The data for wavelengths.
information : The array which contains the information, of all curves to display.
|
gui/gui_mainLayout.py
|
def display_the_graphic(self, num_line, wavelength, data_wanted, information):
"""
This function calls the class "MplCanvas" of "gui_matplotlibwidgetFile.py" to plot results.
Inputs : num_line : The number of cases.
wavelength : The wavelengths.
data_wanted : The data for wavelengths.
information : The array which contains the information, of all curves to display.
"""
self.nb_case = num_line - 1 # This is the number of line, the number of test.
self.graphic_slider(self.nb_case)
self.mpl_canvas.update_fields(wavelength, data_wanted, self.slider_value)
# Following if the checkbox is checked "All curves" or not.
if self.ui.show_grid.checkState() == 2:
grid = True
else:
grid = False
if self.ui.show_all_curves.checkState() == 2:
self.flag_curves = True
self.mpl_canvas.display_graphic(self.flag_curves, self.ui, grid)
self.print_graphic_information(self.slider_value, information)
else:
self.flag_curves = False
self.mpl_canvas.display_graphic(self.flag_curves, self.ui, grid)
self.print_graphic_information(self.slider_value, information)
|
def display_the_graphic(self, num_line, wavelength, data_wanted, information):
"""
This function calls the class "MplCanvas" of "gui_matplotlibwidgetFile.py" to plot results.
Inputs : num_line : The number of cases.
wavelength : The wavelengths.
data_wanted : The data for wavelengths.
information : The array which contains the information, of all curves to display.
"""
self.nb_case = num_line - 1 # This is the number of line, the number of test.
self.graphic_slider(self.nb_case)
self.mpl_canvas.update_fields(wavelength, data_wanted, self.slider_value)
# Following if the checkbox is checked "All curves" or not.
if self.ui.show_grid.checkState() == 2:
grid = True
else:
grid = False
if self.ui.show_all_curves.checkState() == 2:
self.flag_curves = True
self.mpl_canvas.display_graphic(self.flag_curves, self.ui, grid)
self.print_graphic_information(self.slider_value, information)
else:
self.flag_curves = False
self.mpl_canvas.display_graphic(self.flag_curves, self.ui, grid)
self.print_graphic_information(self.slider_value, information)
|
[
"This",
"function",
"calls",
"the",
"class",
"MplCanvas",
"of",
"gui_matplotlibwidgetFile",
".",
"py",
"to",
"plot",
"results",
".",
"Inputs",
":",
"num_line",
":",
"The",
"number",
"of",
"cases",
".",
"wavelength",
":",
"The",
"wavelengths",
".",
"data_wanted",
":",
"The",
"data",
"for",
"wavelengths",
".",
"information",
":",
"The",
"array",
"which",
"contains",
"the",
"information",
"of",
"all",
"curves",
"to",
"display",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L535-L562
|
[
"def",
"display_the_graphic",
"(",
"self",
",",
"num_line",
",",
"wavelength",
",",
"data_wanted",
",",
"information",
")",
":",
"self",
".",
"nb_case",
"=",
"num_line",
"-",
"1",
"# This is the number of line, the number of test.",
"self",
".",
"graphic_slider",
"(",
"self",
".",
"nb_case",
")",
"self",
".",
"mpl_canvas",
".",
"update_fields",
"(",
"wavelength",
",",
"data_wanted",
",",
"self",
".",
"slider_value",
")",
"# Following if the checkbox is checked \"All curves\" or not.",
"if",
"self",
".",
"ui",
".",
"show_grid",
".",
"checkState",
"(",
")",
"==",
"2",
":",
"grid",
"=",
"True",
"else",
":",
"grid",
"=",
"False",
"if",
"self",
".",
"ui",
".",
"show_all_curves",
".",
"checkState",
"(",
")",
"==",
"2",
":",
"self",
".",
"flag_curves",
"=",
"True",
"self",
".",
"mpl_canvas",
".",
"display_graphic",
"(",
"self",
".",
"flag_curves",
",",
"self",
".",
"ui",
",",
"grid",
")",
"self",
".",
"print_graphic_information",
"(",
"self",
".",
"slider_value",
",",
"information",
")",
"else",
":",
"self",
".",
"flag_curves",
"=",
"False",
"self",
".",
"mpl_canvas",
".",
"display_graphic",
"(",
"self",
".",
"flag_curves",
",",
"self",
".",
"ui",
",",
"grid",
")",
"self",
".",
"print_graphic_information",
"(",
"self",
".",
"slider_value",
",",
"information",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.display_the_graphic_connection
|
The following permits to attribute the function "display_the_graphic" to the slider.
Because, to make a connection, we can not have parameters for the function, but "display_the_graphic" has some.
|
gui/gui_mainLayout.py
|
def display_the_graphic_connection(self):
"""
The following permits to attribute the function "display_the_graphic" to the slider.
Because, to make a connection, we can not have parameters for the function, but "display_the_graphic" has some.
"""
self.display_the_graphic(self.num_line, self.wavelength, self.data_wanted, self.information)
|
def display_the_graphic_connection(self):
"""
The following permits to attribute the function "display_the_graphic" to the slider.
Because, to make a connection, we can not have parameters for the function, but "display_the_graphic" has some.
"""
self.display_the_graphic(self.num_line, self.wavelength, self.data_wanted, self.information)
|
[
"The",
"following",
"permits",
"to",
"attribute",
"the",
"function",
"display_the_graphic",
"to",
"the",
"slider",
".",
"Because",
"to",
"make",
"a",
"connection",
"we",
"can",
"not",
"have",
"parameters",
"for",
"the",
"function",
"but",
"display_the_graphic",
"has",
"some",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L564-L569
|
[
"def",
"display_the_graphic_connection",
"(",
"self",
")",
":",
"self",
".",
"display_the_graphic",
"(",
"self",
".",
"num_line",
",",
"self",
".",
"wavelength",
",",
"self",
".",
"data_wanted",
",",
"self",
".",
"information",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.print_graphic_information
|
This function displays information about curves.
Inputs ; num_curve ; The index of the curve's line that we have to display.
information ; The array which contains the information, of all curves to display.
|
gui/gui_mainLayout.py
|
def print_graphic_information(self, num_curve, information):
"""
This function displays information about curves.
Inputs ; num_curve ; The index of the curve's line that we have to display.
information ; The array which contains the information, of all curves to display.
"""
"""In this function, the best would to create labels each time we need to create one,
following the number of labels in label_information.
#self.essai = QtGui.QLabel(self.ui.tab)
#self.essai.setGeometry(PyQt4.QtCore.QRect(870,650,111,16))
#self.essai.setText("ESSAI")
"""
label_information = information[0]
data_information = information[1:]
count_nb_label = 0 # Iterator on all labels of label_information
nb_label = len(label_information)
while count_nb_label <= nb_label:
self.ui.column1_label.setText(label_information[0].strip('\"'))
self.ui.column2_label.setText(label_information[1].strip('\"'))
self.ui.column3_label.setText(label_information[2].strip('\"'))
self.ui.column4_label.setText(label_information[3].strip('\"'))
self.ui.column5_label.setText(label_information[4].strip('\"'))
self.ui.column6_label.setText(label_information[5].strip('\"'))
self.ui.column7_label.setText(label_information[6].strip('\"'))
self.ui.column8_label.setText(label_information[7].strip('\"'))
count_nb_label += 1
line_of_data = 0 # Iterator on each line of data_information.
while line_of_data < len(data_information):
if line_of_data == num_curve:
self.ui.column1_result.setText(data_information[line_of_data][0])
self.ui.column2_result.setText(data_information[line_of_data][1])
self.ui.column3_result.setText(data_information[line_of_data][2])
self.ui.column4_result.setText(data_information[line_of_data][3])
self.ui.column5_result.setText(data_information[line_of_data][4])
self.ui.column6_result.setText(data_information[line_of_data][5])
self.ui.column7_result.setText(data_information[line_of_data][6])
self.ui.column8_result.setText(data_information[line_of_data][7])
line_of_data += 1
|
def print_graphic_information(self, num_curve, information):
"""
This function displays information about curves.
Inputs ; num_curve ; The index of the curve's line that we have to display.
information ; The array which contains the information, of all curves to display.
"""
"""In this function, the best would to create labels each time we need to create one,
following the number of labels in label_information.
#self.essai = QtGui.QLabel(self.ui.tab)
#self.essai.setGeometry(PyQt4.QtCore.QRect(870,650,111,16))
#self.essai.setText("ESSAI")
"""
label_information = information[0]
data_information = information[1:]
count_nb_label = 0 # Iterator on all labels of label_information
nb_label = len(label_information)
while count_nb_label <= nb_label:
self.ui.column1_label.setText(label_information[0].strip('\"'))
self.ui.column2_label.setText(label_information[1].strip('\"'))
self.ui.column3_label.setText(label_information[2].strip('\"'))
self.ui.column4_label.setText(label_information[3].strip('\"'))
self.ui.column5_label.setText(label_information[4].strip('\"'))
self.ui.column6_label.setText(label_information[5].strip('\"'))
self.ui.column7_label.setText(label_information[6].strip('\"'))
self.ui.column8_label.setText(label_information[7].strip('\"'))
count_nb_label += 1
line_of_data = 0 # Iterator on each line of data_information.
while line_of_data < len(data_information):
if line_of_data == num_curve:
self.ui.column1_result.setText(data_information[line_of_data][0])
self.ui.column2_result.setText(data_information[line_of_data][1])
self.ui.column3_result.setText(data_information[line_of_data][2])
self.ui.column4_result.setText(data_information[line_of_data][3])
self.ui.column5_result.setText(data_information[line_of_data][4])
self.ui.column6_result.setText(data_information[line_of_data][5])
self.ui.column7_result.setText(data_information[line_of_data][6])
self.ui.column8_result.setText(data_information[line_of_data][7])
line_of_data += 1
|
[
"This",
"function",
"displays",
"information",
"about",
"curves",
".",
"Inputs",
";",
"num_curve",
";",
"The",
"index",
"of",
"the",
"curve",
"s",
"line",
"that",
"we",
"have",
"to",
"display",
".",
"information",
";",
"The",
"array",
"which",
"contains",
"the",
"information",
"of",
"all",
"curves",
"to",
"display",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L571-L611
|
[
"def",
"print_graphic_information",
"(",
"self",
",",
"num_curve",
",",
"information",
")",
":",
"\"\"\"In this function, the best would to create labels each time we need to create one,\n following the number of labels in label_information.\n #self.essai = QtGui.QLabel(self.ui.tab)\n #self.essai.setGeometry(PyQt4.QtCore.QRect(870,650,111,16))\n #self.essai.setText(\"ESSAI\")\n \"\"\"",
"label_information",
"=",
"information",
"[",
"0",
"]",
"data_information",
"=",
"information",
"[",
"1",
":",
"]",
"count_nb_label",
"=",
"0",
"# Iterator on all labels of label_information",
"nb_label",
"=",
"len",
"(",
"label_information",
")",
"while",
"count_nb_label",
"<=",
"nb_label",
":",
"self",
".",
"ui",
".",
"column1_label",
".",
"setText",
"(",
"label_information",
"[",
"0",
"]",
".",
"strip",
"(",
"'\\\"'",
")",
")",
"self",
".",
"ui",
".",
"column2_label",
".",
"setText",
"(",
"label_information",
"[",
"1",
"]",
".",
"strip",
"(",
"'\\\"'",
")",
")",
"self",
".",
"ui",
".",
"column3_label",
".",
"setText",
"(",
"label_information",
"[",
"2",
"]",
".",
"strip",
"(",
"'\\\"'",
")",
")",
"self",
".",
"ui",
".",
"column4_label",
".",
"setText",
"(",
"label_information",
"[",
"3",
"]",
".",
"strip",
"(",
"'\\\"'",
")",
")",
"self",
".",
"ui",
".",
"column5_label",
".",
"setText",
"(",
"label_information",
"[",
"4",
"]",
".",
"strip",
"(",
"'\\\"'",
")",
")",
"self",
".",
"ui",
".",
"column6_label",
".",
"setText",
"(",
"label_information",
"[",
"5",
"]",
".",
"strip",
"(",
"'\\\"'",
")",
")",
"self",
".",
"ui",
".",
"column7_label",
".",
"setText",
"(",
"label_information",
"[",
"6",
"]",
".",
"strip",
"(",
"'\\\"'",
")",
")",
"self",
".",
"ui",
".",
"column8_label",
".",
"setText",
"(",
"label_information",
"[",
"7",
"]",
".",
"strip",
"(",
"'\\\"'",
")",
")",
"count_nb_label",
"+=",
"1",
"line_of_data",
"=",
"0",
"# Iterator on each line of data_information.",
"while",
"line_of_data",
"<",
"len",
"(",
"data_information",
")",
":",
"if",
"line_of_data",
"==",
"num_curve",
":",
"self",
".",
"ui",
".",
"column1_result",
".",
"setText",
"(",
"data_information",
"[",
"line_of_data",
"]",
"[",
"0",
"]",
")",
"self",
".",
"ui",
".",
"column2_result",
".",
"setText",
"(",
"data_information",
"[",
"line_of_data",
"]",
"[",
"1",
"]",
")",
"self",
".",
"ui",
".",
"column3_result",
".",
"setText",
"(",
"data_information",
"[",
"line_of_data",
"]",
"[",
"2",
"]",
")",
"self",
".",
"ui",
".",
"column4_result",
".",
"setText",
"(",
"data_information",
"[",
"line_of_data",
"]",
"[",
"3",
"]",
")",
"self",
".",
"ui",
".",
"column5_result",
".",
"setText",
"(",
"data_information",
"[",
"line_of_data",
"]",
"[",
"4",
"]",
")",
"self",
".",
"ui",
".",
"column6_result",
".",
"setText",
"(",
"data_information",
"[",
"line_of_data",
"]",
"[",
"5",
"]",
")",
"self",
".",
"ui",
".",
"column7_result",
".",
"setText",
"(",
"data_information",
"[",
"line_of_data",
"]",
"[",
"6",
"]",
")",
"self",
".",
"ui",
".",
"column8_result",
".",
"setText",
"(",
"data_information",
"[",
"line_of_data",
"]",
"[",
"7",
"]",
")",
"line_of_data",
"+=",
"1"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.graphic_slider
|
This function scales the slider for curves displayed.
Input : The number of cases (curves).
Return ; The slider value.
|
gui/gui_mainLayout.py
|
def graphic_slider(self, nb_case):
"""
This function scales the slider for curves displayed.
Input : The number of cases (curves).
Return ; The slider value.
"""
"""
The slider range is created each time we call this function. Search to set its range just when it is necessary.
"""
self.ui.sens.setDisabled(False)
self.ui.sens.setRange(0, int(nb_case - 1))
self.slider_value = self.ui.sens.value()
return self.slider_value
|
def graphic_slider(self, nb_case):
"""
This function scales the slider for curves displayed.
Input : The number of cases (curves).
Return ; The slider value.
"""
"""
The slider range is created each time we call this function. Search to set its range just when it is necessary.
"""
self.ui.sens.setDisabled(False)
self.ui.sens.setRange(0, int(nb_case - 1))
self.slider_value = self.ui.sens.value()
return self.slider_value
|
[
"This",
"function",
"scales",
"the",
"slider",
"for",
"curves",
"displayed",
".",
"Input",
":",
"The",
"number",
"of",
"cases",
"(",
"curves",
")",
".",
"Return",
";",
"The",
"slider",
"value",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L613-L628
|
[
"def",
"graphic_slider",
"(",
"self",
",",
"nb_case",
")",
":",
"\"\"\"\n The slider range is created each time we call this function. Search to set its range just when it is necessary.\n \"\"\"",
"self",
".",
"ui",
".",
"sens",
".",
"setDisabled",
"(",
"False",
")",
"self",
".",
"ui",
".",
"sens",
".",
"setRange",
"(",
"0",
",",
"int",
"(",
"nb_case",
"-",
"1",
")",
")",
"self",
".",
"slider_value",
"=",
"self",
".",
"ui",
".",
"sens",
".",
"value",
"(",
")",
"return",
"self",
".",
"slider_value"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.display_error_message
|
This function displays an error message when a wrong value is typed.
|
gui/gui_mainLayout.py
|
def display_error_message(self):
"""
This function displays an error message when a wrong value is typed.
"""
self.ui.error_label.setScaledContents(True) # Warning image shown.
self.ui.error_text_label.show() # Warning message shown.
self.ui.error_text_label.setStyleSheet('color: red')
|
def display_error_message(self):
"""
This function displays an error message when a wrong value is typed.
"""
self.ui.error_label.setScaledContents(True) # Warning image shown.
self.ui.error_text_label.show() # Warning message shown.
self.ui.error_text_label.setStyleSheet('color: red')
|
[
"This",
"function",
"displays",
"an",
"error",
"message",
"when",
"a",
"wrong",
"value",
"is",
"typed",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L630-L636
|
[
"def",
"display_error_message",
"(",
"self",
")",
":",
"self",
".",
"ui",
".",
"error_label",
".",
"setScaledContents",
"(",
"True",
")",
"# Warning image shown.",
"self",
".",
"ui",
".",
"error_text_label",
".",
"show",
"(",
")",
"# Warning message shown.",
"self",
".",
"ui",
".",
"error_text_label",
".",
"setStyleSheet",
"(",
"'color: red'",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.hide_error_message
|
This function hides the error message when all values are correct.
|
gui/gui_mainLayout.py
|
def hide_error_message(self):
"""
This function hides the error message when all values are correct.
"""
self.ui.error_label.setScaledContents(False) # Warning image hiden.
self.ui.error_text_label.hide()
|
def hide_error_message(self):
"""
This function hides the error message when all values are correct.
"""
self.ui.error_label.setScaledContents(False) # Warning image hiden.
self.ui.error_text_label.hide()
|
[
"This",
"function",
"hides",
"the",
"error",
"message",
"when",
"all",
"values",
"are",
"correct",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L638-L643
|
[
"def",
"hide_error_message",
"(",
"self",
")",
":",
"self",
".",
"ui",
".",
"error_label",
".",
"setScaledContents",
"(",
"False",
")",
"# Warning image hiden.",
"self",
".",
"ui",
".",
"error_text_label",
".",
"hide",
"(",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.run
|
This function executes planarRad using the batch file.
|
gui/gui_mainLayout.py
|
def run(self):
"""
This function executes planarRad using the batch file.
"""
"""
Error when planarRad start : /bin/sh: 1: ../planarrad.py: not found
"""
print('Executing planarrad')
# If we are not in the reverse_mode :
if self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE:
self.data()
self.check_values()
if self.without_error == False:
self.display_error_message()
elif self.without_error == True:
self.is_running = True
self.hide_error_message()
self.write_to_file()
os.chdir('./')
self.progress_bar()
this_dir = os.path.dirname(os.path.realpath(__file__)).rstrip('gui/')
batch_file = os.path.join(this_dir, "inputs/batch_files/" + str(self.batch_name_value) + "_batch.txt")
print(batch_file)
self.p = subprocess.Popen(
["./planarrad.py -i " + batch_file],
shell=True)
if self.ui.progressBar.value() == 100:
self.display_the_graphic(self.num_line, self.wavelength, self.data_wanted, self.information)
|
def run(self):
"""
This function executes planarRad using the batch file.
"""
"""
Error when planarRad start : /bin/sh: 1: ../planarrad.py: not found
"""
print('Executing planarrad')
# If we are not in the reverse_mode :
if self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE:
self.data()
self.check_values()
if self.without_error == False:
self.display_error_message()
elif self.without_error == True:
self.is_running = True
self.hide_error_message()
self.write_to_file()
os.chdir('./')
self.progress_bar()
this_dir = os.path.dirname(os.path.realpath(__file__)).rstrip('gui/')
batch_file = os.path.join(this_dir, "inputs/batch_files/" + str(self.batch_name_value) + "_batch.txt")
print(batch_file)
self.p = subprocess.Popen(
["./planarrad.py -i " + batch_file],
shell=True)
if self.ui.progressBar.value() == 100:
self.display_the_graphic(self.num_line, self.wavelength, self.data_wanted, self.information)
|
[
"This",
"function",
"executes",
"planarRad",
"using",
"the",
"batch",
"file",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L645-L674
|
[
"def",
"run",
"(",
"self",
")",
":",
"\"\"\"\n Error when planarRad start : /bin/sh: 1: ../planarrad.py: not found\n \"\"\"",
"print",
"(",
"'Executing planarrad'",
")",
"# If we are not in the reverse_mode :",
"if",
"self",
".",
"ui",
".",
"tabWidget",
".",
"currentIndex",
"(",
")",
"==",
"TabWidget",
".",
"NORMAL_MODE",
":",
"self",
".",
"data",
"(",
")",
"self",
".",
"check_values",
"(",
")",
"if",
"self",
".",
"without_error",
"==",
"False",
":",
"self",
".",
"display_error_message",
"(",
")",
"elif",
"self",
".",
"without_error",
"==",
"True",
":",
"self",
".",
"is_running",
"=",
"True",
"self",
".",
"hide_error_message",
"(",
")",
"self",
".",
"write_to_file",
"(",
")",
"os",
".",
"chdir",
"(",
"'./'",
")",
"self",
".",
"progress_bar",
"(",
")",
"this_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
".",
"rstrip",
"(",
"'gui/'",
")",
"batch_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"this_dir",
",",
"\"inputs/batch_files/\"",
"+",
"str",
"(",
"self",
".",
"batch_name_value",
")",
"+",
"\"_batch.txt\"",
")",
"print",
"(",
"batch_file",
")",
"self",
".",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"\"./planarrad.py -i \"",
"+",
"batch_file",
"]",
",",
"shell",
"=",
"True",
")",
"if",
"self",
".",
"ui",
".",
"progressBar",
".",
"value",
"(",
")",
"==",
"100",
":",
"self",
".",
"display_the_graphic",
"(",
"self",
".",
"num_line",
",",
"self",
".",
"wavelength",
",",
"self",
".",
"data_wanted",
",",
"self",
".",
"information",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.cancel_planarrad
|
This function cancels PlanarRad.
|
gui/gui_mainLayout.py
|
def cancel_planarrad(self):
"""
This function cancels PlanarRad.
"""
"""
This function needs to be tested. We don't know if she works.
"""
if (self.is_running == True) & (self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE):
cancel = QtGui.QMessageBox.question(self.ui.cancel, 'Cancel PlanarRad', "Are you sure to cancel ?",
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if cancel == QtGui.QMessageBox.Yes:
self.is_running = False
os.kill(self.p.pid, signal.SIGTERM)
print("Necessary to check if cancel_planarrad works well !")
self.ui.progressBar.reset()
else:
pass
|
def cancel_planarrad(self):
"""
This function cancels PlanarRad.
"""
"""
This function needs to be tested. We don't know if she works.
"""
if (self.is_running == True) & (self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE):
cancel = QtGui.QMessageBox.question(self.ui.cancel, 'Cancel PlanarRad', "Are you sure to cancel ?",
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if cancel == QtGui.QMessageBox.Yes:
self.is_running = False
os.kill(self.p.pid, signal.SIGTERM)
print("Necessary to check if cancel_planarrad works well !")
self.ui.progressBar.reset()
else:
pass
|
[
"This",
"function",
"cancels",
"PlanarRad",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L715-L736
|
[
"def",
"cancel_planarrad",
"(",
"self",
")",
":",
"\"\"\"\n This function needs to be tested. We don't know if she works.\n \"\"\"",
"if",
"(",
"self",
".",
"is_running",
"==",
"True",
")",
"&",
"(",
"self",
".",
"ui",
".",
"tabWidget",
".",
"currentIndex",
"(",
")",
"==",
"TabWidget",
".",
"NORMAL_MODE",
")",
":",
"cancel",
"=",
"QtGui",
".",
"QMessageBox",
".",
"question",
"(",
"self",
".",
"ui",
".",
"cancel",
",",
"'Cancel PlanarRad'",
",",
"\"Are you sure to cancel ?\"",
",",
"QtGui",
".",
"QMessageBox",
".",
"Yes",
",",
"QtGui",
".",
"QMessageBox",
".",
"No",
")",
"if",
"cancel",
"==",
"QtGui",
".",
"QMessageBox",
".",
"Yes",
":",
"self",
".",
"is_running",
"=",
"False",
"os",
".",
"kill",
"(",
"self",
".",
"p",
".",
"pid",
",",
"signal",
".",
"SIGTERM",
")",
"print",
"(",
"\"Necessary to check if cancel_planarrad works well !\"",
")",
"self",
".",
"ui",
".",
"progressBar",
".",
"reset",
"(",
")",
"else",
":",
"pass"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.quit
|
This function quits PlanarRad, checking if PlanarRad is running before.
|
gui/gui_mainLayout.py
|
def quit(self):
"""
This function quits PlanarRad, checking if PlanarRad is running before.
"""
"""
Nothing programmed for displaying a message box when the user clicks on the window cross in order to quit.
"""
if self.is_running == True:
warning_planarrad_running = QtGui.QMessageBox.warning(self.ui.quit, 'Warning !',
"PlanarRad is running. Stop it before quit !",
QtGui.QMessageBox.Ok)
else:
quit = QtGui.QMessageBox.question(self.ui.quit, 'Quit PlanarRad', "Are you sure to quit ?",
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if quit == QtGui.QMessageBox.Yes:
QtGui.qApp.quit()
|
def quit(self):
"""
This function quits PlanarRad, checking if PlanarRad is running before.
"""
"""
Nothing programmed for displaying a message box when the user clicks on the window cross in order to quit.
"""
if self.is_running == True:
warning_planarrad_running = QtGui.QMessageBox.warning(self.ui.quit, 'Warning !',
"PlanarRad is running. Stop it before quit !",
QtGui.QMessageBox.Ok)
else:
quit = QtGui.QMessageBox.question(self.ui.quit, 'Quit PlanarRad', "Are you sure to quit ?",
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if quit == QtGui.QMessageBox.Yes:
QtGui.qApp.quit()
|
[
"This",
"function",
"quits",
"PlanarRad",
"checking",
"if",
"PlanarRad",
"is",
"running",
"before",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L738-L757
|
[
"def",
"quit",
"(",
"self",
")",
":",
"\"\"\"\n Nothing programmed for displaying a message box when the user clicks on the window cross in order to quit.\n \"\"\"",
"if",
"self",
".",
"is_running",
"==",
"True",
":",
"warning_planarrad_running",
"=",
"QtGui",
".",
"QMessageBox",
".",
"warning",
"(",
"self",
".",
"ui",
".",
"quit",
",",
"'Warning !'",
",",
"\"PlanarRad is running. Stop it before quit !\"",
",",
"QtGui",
".",
"QMessageBox",
".",
"Ok",
")",
"else",
":",
"quit",
"=",
"QtGui",
".",
"QMessageBox",
".",
"question",
"(",
"self",
".",
"ui",
".",
"quit",
",",
"'Quit PlanarRad'",
",",
"\"Are you sure to quit ?\"",
",",
"QtGui",
".",
"QMessageBox",
".",
"Yes",
",",
"QtGui",
".",
"QMessageBox",
".",
"No",
")",
"if",
"quit",
"==",
"QtGui",
".",
"QMessageBox",
".",
"Yes",
":",
"QtGui",
".",
"qApp",
".",
"quit",
"(",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.save_figure
|
This function programs the button to save the figure displayed
and save it in a png file in the current repository.
|
gui/gui_mainLayout.py
|
def save_figure(self):
"""
This function programs the button to save the figure displayed
and save it in a png file in the current repository.
"""
"""
Increment the name of the figure in order to not erase the previous figure if the user use always this method.
The png file is put in the "Artists_saved" file localized in the "planarradpy" folder.
"""
default_name = 'Default_figure.png'
self.ui.graphic_widget.canvas.print_figure(default_name)
src = './' + default_name
dst = './Artists_saved'
os.system("mv" + " " + src + " " + dst)
|
def save_figure(self):
"""
This function programs the button to save the figure displayed
and save it in a png file in the current repository.
"""
"""
Increment the name of the figure in order to not erase the previous figure if the user use always this method.
The png file is put in the "Artists_saved" file localized in the "planarradpy" folder.
"""
default_name = 'Default_figure.png'
self.ui.graphic_widget.canvas.print_figure(default_name)
src = './' + default_name
dst = './Artists_saved'
os.system("mv" + " " + src + " " + dst)
|
[
"This",
"function",
"programs",
"the",
"button",
"to",
"save",
"the",
"figure",
"displayed",
"and",
"save",
"it",
"in",
"a",
"png",
"file",
"in",
"the",
"current",
"repository",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L759-L775
|
[
"def",
"save_figure",
"(",
"self",
")",
":",
"\"\"\"\n Increment the name of the figure in order to not erase the previous figure if the user use always this method.\n The png file is put in the \"Artists_saved\" file localized in the \"planarradpy\" folder.\n \"\"\"",
"default_name",
"=",
"'Default_figure.png'",
"self",
".",
"ui",
".",
"graphic_widget",
".",
"canvas",
".",
"print_figure",
"(",
"default_name",
")",
"src",
"=",
"'./'",
"+",
"default_name",
"dst",
"=",
"'./Artists_saved'",
"os",
".",
"system",
"(",
"\"mv\"",
"+",
"\" \"",
"+",
"src",
"+",
"\" \"",
"+",
"dst",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.save_figure_as
|
This function programs the button to save the figure displayed
and save it in a png file where you want / with the name you want thanks to a file dialog.
|
gui/gui_mainLayout.py
|
def save_figure_as(self):
"""
This function programs the button to save the figure displayed
and save it in a png file where you want / with the name you want thanks to a file dialog.
"""
self.file_name = QtGui.QFileDialog.getSaveFileName()
self.file_name = self.file_name + ".png"
self.ui.graphic_widget.canvas.print_figure(str(self.file_name))
|
def save_figure_as(self):
"""
This function programs the button to save the figure displayed
and save it in a png file where you want / with the name you want thanks to a file dialog.
"""
self.file_name = QtGui.QFileDialog.getSaveFileName()
self.file_name = self.file_name + ".png"
self.ui.graphic_widget.canvas.print_figure(str(self.file_name))
|
[
"This",
"function",
"programs",
"the",
"button",
"to",
"save",
"the",
"figure",
"displayed",
"and",
"save",
"it",
"in",
"a",
"png",
"file",
"where",
"you",
"want",
"/",
"with",
"the",
"name",
"you",
"want",
"thanks",
"to",
"a",
"file",
"dialog",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L777-L784
|
[
"def",
"save_figure_as",
"(",
"self",
")",
":",
"self",
".",
"file_name",
"=",
"QtGui",
".",
"QFileDialog",
".",
"getSaveFileName",
"(",
")",
"self",
".",
"file_name",
"=",
"self",
".",
"file_name",
"+",
"\".png\"",
"self",
".",
"ui",
".",
"graphic_widget",
".",
"canvas",
".",
"print_figure",
"(",
"str",
"(",
"self",
".",
"file_name",
")",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.open_log_file
|
The following opens the log file of PlanarRad.
|
gui/gui_mainLayout.py
|
def open_log_file(self):
"""
The following opens the log file of PlanarRad.
"""
"""
TO DO.
"""
# webbrowser.open('https://marrabld.github.io/planarradpy/')
f = open(os.path.expanduser('~/.planarradpy/log/libplanarradpy.log'))
# self.uiLog.textEdit.setText(str(f.readlines()))
self.uiLog.textEdit.setPlainText(str(f.read()))
self.log_window.show()
|
def open_log_file(self):
"""
The following opens the log file of PlanarRad.
"""
"""
TO DO.
"""
# webbrowser.open('https://marrabld.github.io/planarradpy/')
f = open(os.path.expanduser('~/.planarradpy/log/libplanarradpy.log'))
# self.uiLog.textEdit.setText(str(f.readlines()))
self.uiLog.textEdit.setPlainText(str(f.read()))
self.log_window.show()
|
[
"The",
"following",
"opens",
"the",
"log",
"file",
"of",
"PlanarRad",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L792-L803
|
[
"def",
"open_log_file",
"(",
"self",
")",
":",
"\"\"\"\n TO DO.\n \"\"\"",
"# webbrowser.open('https://marrabld.github.io/planarradpy/')",
"f",
"=",
"open",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~/.planarradpy/log/libplanarradpy.log'",
")",
")",
"# self.uiLog.textEdit.setText(str(f.readlines()))",
"self",
".",
"uiLog",
".",
"textEdit",
".",
"setPlainText",
"(",
"str",
"(",
"f",
".",
"read",
"(",
")",
")",
")",
"self",
".",
"log_window",
".",
"show",
"(",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.open_documentation
|
The following opens the documentation file.
|
gui/gui_mainLayout.py
|
def open_documentation(self):
"""
The following opens the documentation file.
"""
"""
TO DO.
"""
# webbrowser.open('https://marrabld.github.io/planarradpy/')
window = Window()
html = QtCore.QUrl.fromLocalFile(os.path.join(os.getcwd(), './docs/_build/html/index.html')) #open('./docs/_build/html/index.html').read()
#window.show()
window.view.load(html)
window.show()
window.exec_()
|
def open_documentation(self):
"""
The following opens the documentation file.
"""
"""
TO DO.
"""
# webbrowser.open('https://marrabld.github.io/planarradpy/')
window = Window()
html = QtCore.QUrl.fromLocalFile(os.path.join(os.getcwd(), './docs/_build/html/index.html')) #open('./docs/_build/html/index.html').read()
#window.show()
window.view.load(html)
window.show()
window.exec_()
|
[
"The",
"following",
"opens",
"the",
"documentation",
"file",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L805-L819
|
[
"def",
"open_documentation",
"(",
"self",
")",
":",
"\"\"\"\n TO DO.\n \"\"\"",
"# webbrowser.open('https://marrabld.github.io/planarradpy/')",
"window",
"=",
"Window",
"(",
")",
"html",
"=",
"QtCore",
".",
"QUrl",
".",
"fromLocalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"'./docs/_build/html/index.html'",
")",
")",
"#open('./docs/_build/html/index.html').read()",
"#window.show()",
"window",
".",
"view",
".",
"load",
"(",
"html",
")",
"window",
".",
"show",
"(",
")",
"window",
".",
"exec_",
"(",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.prerequisite_actions
|
This function does all required actions at the beginning when we run the GUI.
|
gui/gui_mainLayout.py
|
def prerequisite_actions(self):
"""
This function does all required actions at the beginning when we run the GUI.
"""
self.hide_error_message()
self.ui.show_all_curves.setDisabled(True)
self.ui.sens.setDisabled(True)
self.ui.show_grid.setDisabled(True)
pathname = os.path.dirname(sys.argv[0])
path = os.path.abspath(pathname)
# self.phytoplankton_path = self.ui.phyto_path.setText(path.replace('gui', 'inputs/iop_files'))
# self.bottom_path = self.ui.bottom_path.setText(path.replace('gui', 'inputs/bottom_files'))
# self.executive_path = self.ui.exec_path.setText("Decide where will be 'jude2_install/bin'")
self.verbose_value = self.ui.verbose_value.setText("6")
self.report_parameter_value = self.ui.report_parameter_value.setText("Rrs")
self.ui.progressBar.reset()
|
def prerequisite_actions(self):
"""
This function does all required actions at the beginning when we run the GUI.
"""
self.hide_error_message()
self.ui.show_all_curves.setDisabled(True)
self.ui.sens.setDisabled(True)
self.ui.show_grid.setDisabled(True)
pathname = os.path.dirname(sys.argv[0])
path = os.path.abspath(pathname)
# self.phytoplankton_path = self.ui.phyto_path.setText(path.replace('gui', 'inputs/iop_files'))
# self.bottom_path = self.ui.bottom_path.setText(path.replace('gui', 'inputs/bottom_files'))
# self.executive_path = self.ui.exec_path.setText("Decide where will be 'jude2_install/bin'")
self.verbose_value = self.ui.verbose_value.setText("6")
self.report_parameter_value = self.ui.report_parameter_value.setText("Rrs")
self.ui.progressBar.reset()
|
[
"This",
"function",
"does",
"all",
"required",
"actions",
"at",
"the",
"beginning",
"when",
"we",
"run",
"the",
"GUI",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L822-L840
|
[
"def",
"prerequisite_actions",
"(",
"self",
")",
":",
"self",
".",
"hide_error_message",
"(",
")",
"self",
".",
"ui",
".",
"show_all_curves",
".",
"setDisabled",
"(",
"True",
")",
"self",
".",
"ui",
".",
"sens",
".",
"setDisabled",
"(",
"True",
")",
"self",
".",
"ui",
".",
"show_grid",
".",
"setDisabled",
"(",
"True",
")",
"pathname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"pathname",
")",
"# self.phytoplankton_path = self.ui.phyto_path.setText(path.replace('gui', 'inputs/iop_files'))",
"# self.bottom_path = self.ui.bottom_path.setText(path.replace('gui', 'inputs/bottom_files'))",
"# self.executive_path = self.ui.exec_path.setText(\"Decide where will be 'jude2_install/bin'\")",
"self",
".",
"verbose_value",
"=",
"self",
".",
"ui",
".",
"verbose_value",
".",
"setText",
"(",
"\"6\"",
")",
"self",
".",
"report_parameter_value",
"=",
"self",
".",
"ui",
".",
"report_parameter_value",
".",
"setText",
"(",
"\"Rrs\"",
")",
"self",
".",
"ui",
".",
"progressBar",
".",
"reset",
"(",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.click
|
This function intercepts the mouse's right click and its position.
|
gui/gui_mainLayout.py
|
def click(self, event):
"""
This function intercepts the mouse's right click and its position.
"""
if event.button == 3:
if self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE:
self.pos = QtGui.QCursor().pos()
self.graphic_context_menu(self.pos)
|
def click(self, event):
"""
This function intercepts the mouse's right click and its position.
"""
if event.button == 3:
if self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE:
self.pos = QtGui.QCursor().pos()
self.graphic_context_menu(self.pos)
|
[
"This",
"function",
"intercepts",
"the",
"mouse",
"s",
"right",
"click",
"and",
"its",
"position",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L854-L861
|
[
"def",
"click",
"(",
"self",
",",
"event",
")",
":",
"if",
"event",
".",
"button",
"==",
"3",
":",
"if",
"self",
".",
"ui",
".",
"tabWidget",
".",
"currentIndex",
"(",
")",
"==",
"TabWidget",
".",
"NORMAL_MODE",
":",
"self",
".",
"pos",
"=",
"QtGui",
".",
"QCursor",
"(",
")",
".",
"pos",
"(",
")",
"self",
".",
"graphic_context_menu",
"(",
"self",
".",
"pos",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.mouse_move
|
The following gets back coordinates of the mouse on the canvas.
|
gui/gui_mainLayout.py
|
def mouse_move(self, event):
"""
The following gets back coordinates of the mouse on the canvas.
"""
if (self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE):
self.posX = event.xdata
self.posY = event.ydata
self.graphic_target(self.posX, self.posY)
|
def mouse_move(self, event):
"""
The following gets back coordinates of the mouse on the canvas.
"""
if (self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE):
self.posX = event.xdata
self.posY = event.ydata
self.graphic_target(self.posX, self.posY)
|
[
"The",
"following",
"gets",
"back",
"coordinates",
"of",
"the",
"mouse",
"on",
"the",
"canvas",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L863-L871
|
[
"def",
"mouse_move",
"(",
"self",
",",
"event",
")",
":",
"if",
"(",
"self",
".",
"ui",
".",
"tabWidget",
".",
"currentIndex",
"(",
")",
"==",
"TabWidget",
".",
"NORMAL_MODE",
")",
":",
"self",
".",
"posX",
"=",
"event",
".",
"xdata",
"self",
".",
"posY",
"=",
"event",
".",
"ydata",
"self",
".",
"graphic_target",
"(",
"self",
".",
"posX",
",",
"self",
".",
"posY",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.graphic_context_menu
|
This function will open a context menu on the graphic to save it.
Inputs : pos : The position of the mouse cursor.
|
gui/gui_mainLayout.py
|
def graphic_context_menu(self, pos):
"""
This function will open a context menu on the graphic to save it.
Inputs : pos : The position of the mouse cursor.
"""
menu = QtGui.QMenu()
self.actionSave_bis = menu.addAction("Save Figure")
self.actionSave_as_bis = menu.addAction("Save Figure As ...")
action = menu.exec_(self.table_widget.mapFromGlobal(pos))
if action == self.actionSave_bis:
self.save_figure()
elif action == self.actionSave_as_bis:
self.save_figure_as()
|
def graphic_context_menu(self, pos):
"""
This function will open a context menu on the graphic to save it.
Inputs : pos : The position of the mouse cursor.
"""
menu = QtGui.QMenu()
self.actionSave_bis = menu.addAction("Save Figure")
self.actionSave_as_bis = menu.addAction("Save Figure As ...")
action = menu.exec_(self.table_widget.mapFromGlobal(pos))
if action == self.actionSave_bis:
self.save_figure()
elif action == self.actionSave_as_bis:
self.save_figure_as()
|
[
"This",
"function",
"will",
"open",
"a",
"context",
"menu",
"on",
"the",
"graphic",
"to",
"save",
"it",
".",
"Inputs",
":",
"pos",
":",
"The",
"position",
"of",
"the",
"mouse",
"cursor",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L873-L886
|
[
"def",
"graphic_context_menu",
"(",
"self",
",",
"pos",
")",
":",
"menu",
"=",
"QtGui",
".",
"QMenu",
"(",
")",
"self",
".",
"actionSave_bis",
"=",
"menu",
".",
"addAction",
"(",
"\"Save Figure\"",
")",
"self",
".",
"actionSave_as_bis",
"=",
"menu",
".",
"addAction",
"(",
"\"Save Figure As ...\"",
")",
"action",
"=",
"menu",
".",
"exec_",
"(",
"self",
".",
"table_widget",
".",
"mapFromGlobal",
"(",
"pos",
")",
")",
"if",
"action",
"==",
"self",
".",
"actionSave_bis",
":",
"self",
".",
"save_figure",
"(",
")",
"elif",
"action",
"==",
"self",
".",
"actionSave_as_bis",
":",
"self",
".",
"save_figure_as",
"(",
")"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
FormEvents.graphic_target
|
The following update labels about mouse coordinates.
|
gui/gui_mainLayout.py
|
def graphic_target(self, x, y):
"""
The following update labels about mouse coordinates.
"""
if self.authorized_display == True:
try:
self.display_the_graphic(self.num_line, self.wavelength, self.data_wanted, self.information)
self.ui.mouse_coordinate.setText("(%0.3f, %0.3f)" % (x, y))
except:
pass
|
def graphic_target(self, x, y):
"""
The following update labels about mouse coordinates.
"""
if self.authorized_display == True:
try:
self.display_the_graphic(self.num_line, self.wavelength, self.data_wanted, self.information)
self.ui.mouse_coordinate.setText("(%0.3f, %0.3f)" % (x, y))
except:
pass
|
[
"The",
"following",
"update",
"labels",
"about",
"mouse",
"coordinates",
"."
] |
marrabld/planarradpy
|
python
|
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L888-L898
|
[
"def",
"graphic_target",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"if",
"self",
".",
"authorized_display",
"==",
"True",
":",
"try",
":",
"self",
".",
"display_the_graphic",
"(",
"self",
".",
"num_line",
",",
"self",
".",
"wavelength",
",",
"self",
".",
"data_wanted",
",",
"self",
".",
"information",
")",
"self",
".",
"ui",
".",
"mouse_coordinate",
".",
"setText",
"(",
"\"(%0.3f, %0.3f)\"",
"%",
"(",
"x",
",",
"y",
")",
")",
"except",
":",
"pass"
] |
5095d1cb98d4f67a7c3108c9282f2d59253e89a8
|
test
|
genesis_signing_lockset
|
in order to avoid a complicated bootstrapping, we define
the genesis_signing_lockset as a lockset with one vote by any validator.
|
hydrachain/consensus/base.py
|
def genesis_signing_lockset(genesis, privkey):
"""
in order to avoid a complicated bootstrapping, we define
the genesis_signing_lockset as a lockset with one vote by any validator.
"""
v = VoteBlock(0, 0, genesis.hash)
v.sign(privkey)
ls = LockSet(num_eligible_votes=1)
ls.add(v)
assert ls.has_quorum
return ls
|
def genesis_signing_lockset(genesis, privkey):
"""
in order to avoid a complicated bootstrapping, we define
the genesis_signing_lockset as a lockset with one vote by any validator.
"""
v = VoteBlock(0, 0, genesis.hash)
v.sign(privkey)
ls = LockSet(num_eligible_votes=1)
ls.add(v)
assert ls.has_quorum
return ls
|
[
"in",
"order",
"to",
"avoid",
"a",
"complicated",
"bootstrapping",
"we",
"define",
"the",
"genesis_signing_lockset",
"as",
"a",
"lockset",
"with",
"one",
"vote",
"by",
"any",
"validator",
"."
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/base.py#L332-L342
|
[
"def",
"genesis_signing_lockset",
"(",
"genesis",
",",
"privkey",
")",
":",
"v",
"=",
"VoteBlock",
"(",
"0",
",",
"0",
",",
"genesis",
".",
"hash",
")",
"v",
".",
"sign",
"(",
"privkey",
")",
"ls",
"=",
"LockSet",
"(",
"num_eligible_votes",
"=",
"1",
")",
"ls",
".",
"add",
"(",
"v",
")",
"assert",
"ls",
".",
"has_quorum",
"return",
"ls"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
Signed.sign
|
Sign this with a private key
|
hydrachain/consensus/base.py
|
def sign(self, privkey):
"""Sign this with a private key"""
if self.v:
raise InvalidSignature("already signed")
if privkey in (0, '', '\x00' * 32):
raise InvalidSignature("Zero privkey cannot sign")
rawhash = sha3(rlp.encode(self, self.__class__.exclude(['v', 'r', 's'])))
if len(privkey) == 64:
privkey = encode_privkey(privkey, 'bin')
pk = PrivateKey(privkey, raw=True)
signature = pk.ecdsa_recoverable_serialize(pk.ecdsa_sign_recoverable(rawhash, raw=True))
signature = signature[0] + chr(signature[1])
self.v = ord(signature[64]) + 27
self.r = big_endian_to_int(signature[0:32])
self.s = big_endian_to_int(signature[32:64])
self._sender = None
return self
|
def sign(self, privkey):
"""Sign this with a private key"""
if self.v:
raise InvalidSignature("already signed")
if privkey in (0, '', '\x00' * 32):
raise InvalidSignature("Zero privkey cannot sign")
rawhash = sha3(rlp.encode(self, self.__class__.exclude(['v', 'r', 's'])))
if len(privkey) == 64:
privkey = encode_privkey(privkey, 'bin')
pk = PrivateKey(privkey, raw=True)
signature = pk.ecdsa_recoverable_serialize(pk.ecdsa_sign_recoverable(rawhash, raw=True))
signature = signature[0] + chr(signature[1])
self.v = ord(signature[64]) + 27
self.r = big_endian_to_int(signature[0:32])
self.s = big_endian_to_int(signature[32:64])
self._sender = None
return self
|
[
"Sign",
"this",
"with",
"a",
"private",
"key"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/base.py#L71-L93
|
[
"def",
"sign",
"(",
"self",
",",
"privkey",
")",
":",
"if",
"self",
".",
"v",
":",
"raise",
"InvalidSignature",
"(",
"\"already signed\"",
")",
"if",
"privkey",
"in",
"(",
"0",
",",
"''",
",",
"'\\x00'",
"*",
"32",
")",
":",
"raise",
"InvalidSignature",
"(",
"\"Zero privkey cannot sign\"",
")",
"rawhash",
"=",
"sha3",
"(",
"rlp",
".",
"encode",
"(",
"self",
",",
"self",
".",
"__class__",
".",
"exclude",
"(",
"[",
"'v'",
",",
"'r'",
",",
"'s'",
"]",
")",
")",
")",
"if",
"len",
"(",
"privkey",
")",
"==",
"64",
":",
"privkey",
"=",
"encode_privkey",
"(",
"privkey",
",",
"'bin'",
")",
"pk",
"=",
"PrivateKey",
"(",
"privkey",
",",
"raw",
"=",
"True",
")",
"signature",
"=",
"pk",
".",
"ecdsa_recoverable_serialize",
"(",
"pk",
".",
"ecdsa_sign_recoverable",
"(",
"rawhash",
",",
"raw",
"=",
"True",
")",
")",
"signature",
"=",
"signature",
"[",
"0",
"]",
"+",
"chr",
"(",
"signature",
"[",
"1",
"]",
")",
"self",
".",
"v",
"=",
"ord",
"(",
"signature",
"[",
"64",
"]",
")",
"+",
"27",
"self",
".",
"r",
"=",
"big_endian_to_int",
"(",
"signature",
"[",
"0",
":",
"32",
"]",
")",
"self",
".",
"s",
"=",
"big_endian_to_int",
"(",
"signature",
"[",
"32",
":",
"64",
"]",
")",
"self",
".",
"_sender",
"=",
"None",
"return",
"self"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
Signed.hash
|
signatures are non deterministic
|
hydrachain/consensus/base.py
|
def hash(self):
"signatures are non deterministic"
if self.sender is None:
raise MissingSignatureError()
class HashSerializable(rlp.Serializable):
fields = [(field, sedes) for field, sedes in self.fields
if field not in ('v', 'r', 's')] + [('_sender', binary)]
_sedes = None
return sha3(rlp.encode(self, HashSerializable))
|
def hash(self):
"signatures are non deterministic"
if self.sender is None:
raise MissingSignatureError()
class HashSerializable(rlp.Serializable):
fields = [(field, sedes) for field, sedes in self.fields
if field not in ('v', 'r', 's')] + [('_sender', binary)]
_sedes = None
return sha3(rlp.encode(self, HashSerializable))
|
[
"signatures",
"are",
"non",
"deterministic"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/base.py#L133-L142
|
[
"def",
"hash",
"(",
"self",
")",
":",
"if",
"self",
".",
"sender",
"is",
"None",
":",
"raise",
"MissingSignatureError",
"(",
")",
"class",
"HashSerializable",
"(",
"rlp",
".",
"Serializable",
")",
":",
"fields",
"=",
"[",
"(",
"field",
",",
"sedes",
")",
"for",
"field",
",",
"sedes",
"in",
"self",
".",
"fields",
"if",
"field",
"not",
"in",
"(",
"'v'",
",",
"'r'",
",",
"'s'",
")",
"]",
"+",
"[",
"(",
"'_sender'",
",",
"binary",
")",
"]",
"_sedes",
"=",
"None",
"return",
"sha3",
"(",
"rlp",
".",
"encode",
"(",
"self",
",",
"HashSerializable",
")",
")"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
LockSet.hr
|
compute (height,round)
We might have multiple rounds before we see consensus for a certain height.
If everything is good, round should always be 0.
|
hydrachain/consensus/base.py
|
def hr(self):
"""compute (height,round)
We might have multiple rounds before we see consensus for a certain height.
If everything is good, round should always be 0.
"""
assert len(self), 'no votes, can not determine height'
h = set([(v.height, v.round) for v in self.votes])
assert len(h) == 1, len(h)
return h.pop()
|
def hr(self):
"""compute (height,round)
We might have multiple rounds before we see consensus for a certain height.
If everything is good, round should always be 0.
"""
assert len(self), 'no votes, can not determine height'
h = set([(v.height, v.round) for v in self.votes])
assert len(h) == 1, len(h)
return h.pop()
|
[
"compute",
"(",
"height",
"round",
")",
"We",
"might",
"have",
"multiple",
"rounds",
"before",
"we",
"see",
"consensus",
"for",
"a",
"certain",
"height",
".",
"If",
"everything",
"is",
"good",
"round",
"should",
"always",
"be",
"0",
"."
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/base.py#L268-L276
|
[
"def",
"hr",
"(",
"self",
")",
":",
"assert",
"len",
"(",
"self",
")",
",",
"'no votes, can not determine height'",
"h",
"=",
"set",
"(",
"[",
"(",
"v",
".",
"height",
",",
"v",
".",
"round",
")",
"for",
"v",
"in",
"self",
".",
"votes",
"]",
")",
"assert",
"len",
"(",
"h",
")",
"==",
"1",
",",
"len",
"(",
"h",
")",
"return",
"h",
".",
"pop",
"(",
")"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
LockSet.has_quorum
|
we've seen +2/3 of all eligible votes voting for one block.
there is a quorum.
|
hydrachain/consensus/base.py
|
def has_quorum(self):
"""
we've seen +2/3 of all eligible votes voting for one block.
there is a quorum.
"""
assert self.is_valid
bhs = self.blockhashes()
if bhs and bhs[0][1] > 2 / 3. * self.num_eligible_votes:
return bhs[0][0]
|
def has_quorum(self):
"""
we've seen +2/3 of all eligible votes voting for one block.
there is a quorum.
"""
assert self.is_valid
bhs = self.blockhashes()
if bhs and bhs[0][1] > 2 / 3. * self.num_eligible_votes:
return bhs[0][0]
|
[
"we",
"ve",
"seen",
"+",
"2",
"/",
"3",
"of",
"all",
"eligible",
"votes",
"voting",
"for",
"one",
"block",
".",
"there",
"is",
"a",
"quorum",
"."
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/base.py#L286-L294
|
[
"def",
"has_quorum",
"(",
"self",
")",
":",
"assert",
"self",
".",
"is_valid",
"bhs",
"=",
"self",
".",
"blockhashes",
"(",
")",
"if",
"bhs",
"and",
"bhs",
"[",
"0",
"]",
"[",
"1",
"]",
">",
"2",
"/",
"3.",
"*",
"self",
".",
"num_eligible_votes",
":",
"return",
"bhs",
"[",
"0",
"]",
"[",
"0",
"]"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
LockSet.has_noquorum
|
less than 1/3 of the known votes are on the same block
|
hydrachain/consensus/base.py
|
def has_noquorum(self):
"""
less than 1/3 of the known votes are on the same block
"""
assert self.is_valid
bhs = self.blockhashes()
if not bhs or bhs[0][1] <= 1 / 3. * self.num_eligible_votes:
assert not self.has_quorum_possible
return True
|
def has_noquorum(self):
"""
less than 1/3 of the known votes are on the same block
"""
assert self.is_valid
bhs = self.blockhashes()
if not bhs or bhs[0][1] <= 1 / 3. * self.num_eligible_votes:
assert not self.has_quorum_possible
return True
|
[
"less",
"than",
"1",
"/",
"3",
"of",
"the",
"known",
"votes",
"are",
"on",
"the",
"same",
"block"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/base.py#L297-L305
|
[
"def",
"has_noquorum",
"(",
"self",
")",
":",
"assert",
"self",
".",
"is_valid",
"bhs",
"=",
"self",
".",
"blockhashes",
"(",
")",
"if",
"not",
"bhs",
"or",
"bhs",
"[",
"0",
"]",
"[",
"1",
"]",
"<=",
"1",
"/",
"3.",
"*",
"self",
".",
"num_eligible_votes",
":",
"assert",
"not",
"self",
".",
"has_quorum_possible",
"return",
"True"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
LockSet.check
|
either invalid or one of quorum, noquorum, quorumpossible
|
hydrachain/consensus/base.py
|
def check(self):
"either invalid or one of quorum, noquorum, quorumpossible"
if not self.is_valid:
return True
test = (self.has_quorum, self.has_quorum_possible, self.has_noquorum)
assert 1 == len([x for x in test if x is not None])
return True
|
def check(self):
"either invalid or one of quorum, noquorum, quorumpossible"
if not self.is_valid:
return True
test = (self.has_quorum, self.has_quorum_possible, self.has_noquorum)
assert 1 == len([x for x in test if x is not None])
return True
|
[
"either",
"invalid",
"or",
"one",
"of",
"quorum",
"noquorum",
"quorumpossible"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/base.py#L321-L327
|
[
"def",
"check",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_valid",
":",
"return",
"True",
"test",
"=",
"(",
"self",
".",
"has_quorum",
",",
"self",
".",
"has_quorum_possible",
",",
"self",
".",
"has_noquorum",
")",
"assert",
"1",
"==",
"len",
"(",
"[",
"x",
"for",
"x",
"in",
"test",
"if",
"x",
"is",
"not",
"None",
"]",
")",
"return",
"True"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
TransientBlock.to_block
|
Convert the transient block to a :class:`ethereum.blocks.Block`
|
hydrachain/consensus/base.py
|
def to_block(self, env, parent=None):
"""Convert the transient block to a :class:`ethereum.blocks.Block`"""
return Block(self.header, self.transaction_list, self.uncles, env=env, parent=parent)
|
def to_block(self, env, parent=None):
"""Convert the transient block to a :class:`ethereum.blocks.Block`"""
return Block(self.header, self.transaction_list, self.uncles, env=env, parent=parent)
|
[
"Convert",
"the",
"transient",
"block",
"to",
"a",
":",
"class",
":",
"ethereum",
".",
"blocks",
".",
"Block"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/base.py#L393-L395
|
[
"def",
"to_block",
"(",
"self",
",",
"env",
",",
"parent",
"=",
"None",
")",
":",
"return",
"Block",
"(",
"self",
".",
"header",
",",
"self",
".",
"transaction_list",
",",
"self",
".",
"uncles",
",",
"env",
"=",
"env",
",",
"parent",
"=",
"parent",
")"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
BlockProposal.validate_votes
|
set of validators may change between heights
|
hydrachain/consensus/base.py
|
def validate_votes(self, validators_H, validators_prevH):
"set of validators may change between heights"
assert self.sender
def check(lockset, validators):
if not lockset.num_eligible_votes == len(validators):
raise InvalidProposalError('lockset num_eligible_votes mismatch')
for v in lockset:
if v.sender not in validators:
raise InvalidProposalError('invalid signer')
if self.round_lockset:
check(self.round_lockset, validators_H)
check(self.signing_lockset, validators_prevH)
return True
|
def validate_votes(self, validators_H, validators_prevH):
"set of validators may change between heights"
assert self.sender
def check(lockset, validators):
if not lockset.num_eligible_votes == len(validators):
raise InvalidProposalError('lockset num_eligible_votes mismatch')
for v in lockset:
if v.sender not in validators:
raise InvalidProposalError('invalid signer')
if self.round_lockset:
check(self.round_lockset, validators_H)
check(self.signing_lockset, validators_prevH)
return True
|
[
"set",
"of",
"validators",
"may",
"change",
"between",
"heights"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/base.py#L497-L511
|
[
"def",
"validate_votes",
"(",
"self",
",",
"validators_H",
",",
"validators_prevH",
")",
":",
"assert",
"self",
".",
"sender",
"def",
"check",
"(",
"lockset",
",",
"validators",
")",
":",
"if",
"not",
"lockset",
".",
"num_eligible_votes",
"==",
"len",
"(",
"validators",
")",
":",
"raise",
"InvalidProposalError",
"(",
"'lockset num_eligible_votes mismatch'",
")",
"for",
"v",
"in",
"lockset",
":",
"if",
"v",
".",
"sender",
"not",
"in",
"validators",
":",
"raise",
"InvalidProposalError",
"(",
"'invalid signer'",
")",
"if",
"self",
".",
"round_lockset",
":",
"check",
"(",
"self",
".",
"round_lockset",
",",
"validators_H",
")",
"check",
"(",
"self",
".",
"signing_lockset",
",",
"validators_prevH",
")",
"return",
"True"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
VotingInstruction.validate_votes
|
set of validators may change between heights
|
hydrachain/consensus/base.py
|
def validate_votes(self, validators_H):
"set of validators may change between heights"
assert self.sender
if not self.round_lockset.num_eligible_votes == len(validators_H):
raise InvalidProposalError('round_lockset num_eligible_votes mismatch')
for v in self.round_lockset:
if v.sender not in validators_H:
raise InvalidProposalError('invalid signer')
|
def validate_votes(self, validators_H):
"set of validators may change between heights"
assert self.sender
if not self.round_lockset.num_eligible_votes == len(validators_H):
raise InvalidProposalError('round_lockset num_eligible_votes mismatch')
for v in self.round_lockset:
if v.sender not in validators_H:
raise InvalidProposalError('invalid signer')
|
[
"set",
"of",
"validators",
"may",
"change",
"between",
"heights"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/base.py#L556-L563
|
[
"def",
"validate_votes",
"(",
"self",
",",
"validators_H",
")",
":",
"assert",
"self",
".",
"sender",
"if",
"not",
"self",
".",
"round_lockset",
".",
"num_eligible_votes",
"==",
"len",
"(",
"validators_H",
")",
":",
"raise",
"InvalidProposalError",
"(",
"'round_lockset num_eligible_votes mismatch'",
")",
"for",
"v",
"in",
"self",
".",
"round_lockset",
":",
"if",
"v",
".",
"sender",
"not",
"in",
"validators_H",
":",
"raise",
"InvalidProposalError",
"(",
"'invalid signer'",
")"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
Fungible.transfer
|
Standardized Contract API:
function transfer(address _to, uint256 _value) returns (bool _success)
|
hydrachain/examples/native/fungible/fungible_contract.py
|
def transfer(ctx, _to='address', _value='uint256', returns=STATUS):
""" Standardized Contract API:
function transfer(address _to, uint256 _value) returns (bool _success)
"""
log.DEV('In Fungible.transfer')
if ctx.accounts[ctx.msg_sender] >= _value:
ctx.accounts[ctx.msg_sender] -= _value
ctx.accounts[_to] += _value
ctx.Transfer(ctx.msg_sender, _to, _value)
return OK
else:
return INSUFFICIENTFUNDS
|
def transfer(ctx, _to='address', _value='uint256', returns=STATUS):
""" Standardized Contract API:
function transfer(address _to, uint256 _value) returns (bool _success)
"""
log.DEV('In Fungible.transfer')
if ctx.accounts[ctx.msg_sender] >= _value:
ctx.accounts[ctx.msg_sender] -= _value
ctx.accounts[_to] += _value
ctx.Transfer(ctx.msg_sender, _to, _value)
return OK
else:
return INSUFFICIENTFUNDS
|
[
"Standardized",
"Contract",
"API",
":",
"function",
"transfer",
"(",
"address",
"_to",
"uint256",
"_value",
")",
"returns",
"(",
"bool",
"_success",
")"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/examples/native/fungible/fungible_contract.py#L49-L60
|
[
"def",
"transfer",
"(",
"ctx",
",",
"_to",
"=",
"'address'",
",",
"_value",
"=",
"'uint256'",
",",
"returns",
"=",
"STATUS",
")",
":",
"log",
".",
"DEV",
"(",
"'In Fungible.transfer'",
")",
"if",
"ctx",
".",
"accounts",
"[",
"ctx",
".",
"msg_sender",
"]",
">=",
"_value",
":",
"ctx",
".",
"accounts",
"[",
"ctx",
".",
"msg_sender",
"]",
"-=",
"_value",
"ctx",
".",
"accounts",
"[",
"_to",
"]",
"+=",
"_value",
"ctx",
".",
"Transfer",
"(",
"ctx",
".",
"msg_sender",
",",
"_to",
",",
"_value",
")",
"return",
"OK",
"else",
":",
"return",
"INSUFFICIENTFUNDS"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
Fungible.transferFrom
|
Standardized Contract API:
function transferFrom(address _from, address _to, uint256 _value) returns (bool success)
|
hydrachain/examples/native/fungible/fungible_contract.py
|
def transferFrom(ctx, _from='address', _to='address', _value='uint256', returns=STATUS):
""" Standardized Contract API:
function transferFrom(address _from, address _to, uint256 _value) returns (bool success)
"""
auth = ctx.allowances[_from][ctx.msg_sender]
if ctx.accounts[_from] >= _value and auth >= _value:
ctx.allowances[_from][ctx.msg_sender] -= _value
ctx.accounts[_from] -= _value
ctx.accounts[_to] += _value
ctx.Transfer(_from, _to, _value)
return OK
else:
return INSUFFICIENTFUNDS
|
def transferFrom(ctx, _from='address', _to='address', _value='uint256', returns=STATUS):
""" Standardized Contract API:
function transferFrom(address _from, address _to, uint256 _value) returns (bool success)
"""
auth = ctx.allowances[_from][ctx.msg_sender]
if ctx.accounts[_from] >= _value and auth >= _value:
ctx.allowances[_from][ctx.msg_sender] -= _value
ctx.accounts[_from] -= _value
ctx.accounts[_to] += _value
ctx.Transfer(_from, _to, _value)
return OK
else:
return INSUFFICIENTFUNDS
|
[
"Standardized",
"Contract",
"API",
":",
"function",
"transferFrom",
"(",
"address",
"_from",
"address",
"_to",
"uint256",
"_value",
")",
"returns",
"(",
"bool",
"success",
")"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/examples/native/fungible/fungible_contract.py#L62-L74
|
[
"def",
"transferFrom",
"(",
"ctx",
",",
"_from",
"=",
"'address'",
",",
"_to",
"=",
"'address'",
",",
"_value",
"=",
"'uint256'",
",",
"returns",
"=",
"STATUS",
")",
":",
"auth",
"=",
"ctx",
".",
"allowances",
"[",
"_from",
"]",
"[",
"ctx",
".",
"msg_sender",
"]",
"if",
"ctx",
".",
"accounts",
"[",
"_from",
"]",
">=",
"_value",
"and",
"auth",
">=",
"_value",
":",
"ctx",
".",
"allowances",
"[",
"_from",
"]",
"[",
"ctx",
".",
"msg_sender",
"]",
"-=",
"_value",
"ctx",
".",
"accounts",
"[",
"_from",
"]",
"-=",
"_value",
"ctx",
".",
"accounts",
"[",
"_to",
"]",
"+=",
"_value",
"ctx",
".",
"Transfer",
"(",
"_from",
",",
"_to",
",",
"_value",
")",
"return",
"OK",
"else",
":",
"return",
"INSUFFICIENTFUNDS"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
Fungible.approve
|
Standardized Contract API:
function approve(address _spender, uint256 _value) returns (bool success)
|
hydrachain/examples/native/fungible/fungible_contract.py
|
def approve(ctx, _spender='address', _value='uint256', returns=STATUS):
""" Standardized Contract API:
function approve(address _spender, uint256 _value) returns (bool success)
"""
ctx.allowances[ctx.msg_sender][_spender] += _value
ctx.Approval(ctx.msg_sender, _spender, _value)
return OK
|
def approve(ctx, _spender='address', _value='uint256', returns=STATUS):
""" Standardized Contract API:
function approve(address _spender, uint256 _value) returns (bool success)
"""
ctx.allowances[ctx.msg_sender][_spender] += _value
ctx.Approval(ctx.msg_sender, _spender, _value)
return OK
|
[
"Standardized",
"Contract",
"API",
":",
"function",
"approve",
"(",
"address",
"_spender",
"uint256",
"_value",
")",
"returns",
"(",
"bool",
"success",
")"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/examples/native/fungible/fungible_contract.py#L90-L96
|
[
"def",
"approve",
"(",
"ctx",
",",
"_spender",
"=",
"'address'",
",",
"_value",
"=",
"'uint256'",
",",
"returns",
"=",
"STATUS",
")",
":",
"ctx",
".",
"allowances",
"[",
"ctx",
".",
"msg_sender",
"]",
"[",
"_spender",
"]",
"+=",
"_value",
"ctx",
".",
"Approval",
"(",
"ctx",
".",
"msg_sender",
",",
"_spender",
",",
"_value",
")",
"return",
"OK"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
IOU.issue_funds
|
In the IOU fungible the supply is set by Issuer, who issue funds.
|
hydrachain/examples/native/fungible/fungible_contract.py
|
def issue_funds(ctx, amount='uint256', rtgs_hash='bytes32', returns=STATUS):
"In the IOU fungible the supply is set by Issuer, who issue funds."
# allocate new issue as result of a new cash entry
ctx.accounts[ctx.msg_sender] += amount
ctx.issued_amounts[ctx.msg_sender] += amount
# Store hash(rtgs)
ctx.Issuance(ctx.msg_sender, rtgs_hash, amount)
return OK
|
def issue_funds(ctx, amount='uint256', rtgs_hash='bytes32', returns=STATUS):
"In the IOU fungible the supply is set by Issuer, who issue funds."
# allocate new issue as result of a new cash entry
ctx.accounts[ctx.msg_sender] += amount
ctx.issued_amounts[ctx.msg_sender] += amount
# Store hash(rtgs)
ctx.Issuance(ctx.msg_sender, rtgs_hash, amount)
return OK
|
[
"In",
"the",
"IOU",
"fungible",
"the",
"supply",
"is",
"set",
"by",
"Issuer",
"who",
"issue",
"funds",
"."
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/examples/native/fungible/fungible_contract.py#L156-L163
|
[
"def",
"issue_funds",
"(",
"ctx",
",",
"amount",
"=",
"'uint256'",
",",
"rtgs_hash",
"=",
"'bytes32'",
",",
"returns",
"=",
"STATUS",
")",
":",
"# allocate new issue as result of a new cash entry",
"ctx",
".",
"accounts",
"[",
"ctx",
".",
"msg_sender",
"]",
"+=",
"amount",
"ctx",
".",
"issued_amounts",
"[",
"ctx",
".",
"msg_sender",
"]",
"+=",
"amount",
"# Store hash(rtgs)",
"ctx",
".",
"Issuance",
"(",
"ctx",
".",
"msg_sender",
",",
"rtgs_hash",
",",
"amount",
")",
"return",
"OK"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
HeightManager.last_lock
|
highest lock on height
|
hydrachain/consensus/manager.py
|
def last_lock(self):
"highest lock on height"
rs = list(self.rounds)
assert len(rs) < 2 or rs[0] > rs[1] # FIXME REMOVE
for r in self.rounds: # is sorted highest to lowest
if self.rounds[r].lock is not None:
return self.rounds[r].lock
|
def last_lock(self):
"highest lock on height"
rs = list(self.rounds)
assert len(rs) < 2 or rs[0] > rs[1] # FIXME REMOVE
for r in self.rounds: # is sorted highest to lowest
if self.rounds[r].lock is not None:
return self.rounds[r].lock
|
[
"highest",
"lock",
"on",
"height"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/manager.py#L490-L496
|
[
"def",
"last_lock",
"(",
"self",
")",
":",
"rs",
"=",
"list",
"(",
"self",
".",
"rounds",
")",
"assert",
"len",
"(",
"rs",
")",
"<",
"2",
"or",
"rs",
"[",
"0",
"]",
">",
"rs",
"[",
"1",
"]",
"# FIXME REMOVE",
"for",
"r",
"in",
"self",
".",
"rounds",
":",
"# is sorted highest to lowest",
"if",
"self",
".",
"rounds",
"[",
"r",
"]",
".",
"lock",
"is",
"not",
"None",
":",
"return",
"self",
".",
"rounds",
"[",
"r",
"]",
".",
"lock"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
HeightManager.last_voted_blockproposal
|
the last block proposal node voted on
|
hydrachain/consensus/manager.py
|
def last_voted_blockproposal(self):
"the last block proposal node voted on"
for r in self.rounds:
if isinstance(self.rounds[r].proposal, BlockProposal):
assert isinstance(self.rounds[r].lock, Vote)
if self.rounds[r].proposal.blockhash == self.rounds[r].lock.blockhash:
return self.rounds[r].proposal
|
def last_voted_blockproposal(self):
"the last block proposal node voted on"
for r in self.rounds:
if isinstance(self.rounds[r].proposal, BlockProposal):
assert isinstance(self.rounds[r].lock, Vote)
if self.rounds[r].proposal.blockhash == self.rounds[r].lock.blockhash:
return self.rounds[r].proposal
|
[
"the",
"last",
"block",
"proposal",
"node",
"voted",
"on"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/manager.py#L499-L505
|
[
"def",
"last_voted_blockproposal",
"(",
"self",
")",
":",
"for",
"r",
"in",
"self",
".",
"rounds",
":",
"if",
"isinstance",
"(",
"self",
".",
"rounds",
"[",
"r",
"]",
".",
"proposal",
",",
"BlockProposal",
")",
":",
"assert",
"isinstance",
"(",
"self",
".",
"rounds",
"[",
"r",
"]",
".",
"lock",
",",
"Vote",
")",
"if",
"self",
".",
"rounds",
"[",
"r",
"]",
".",
"proposal",
".",
"blockhash",
"==",
"self",
".",
"rounds",
"[",
"r",
"]",
".",
"lock",
".",
"blockhash",
":",
"return",
"self",
".",
"rounds",
"[",
"r",
"]",
".",
"proposal"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
HeightManager.last_valid_lockset
|
highest valid lockset on height
|
hydrachain/consensus/manager.py
|
def last_valid_lockset(self):
"highest valid lockset on height"
for r in self.rounds:
ls = self.rounds[r].lockset
if ls.is_valid:
return ls
return None
|
def last_valid_lockset(self):
"highest valid lockset on height"
for r in self.rounds:
ls = self.rounds[r].lockset
if ls.is_valid:
return ls
return None
|
[
"highest",
"valid",
"lockset",
"on",
"height"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/manager.py#L508-L514
|
[
"def",
"last_valid_lockset",
"(",
"self",
")",
":",
"for",
"r",
"in",
"self",
".",
"rounds",
":",
"ls",
"=",
"self",
".",
"rounds",
"[",
"r",
"]",
".",
"lockset",
"if",
"ls",
".",
"is_valid",
":",
"return",
"ls",
"return",
"None"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
RoundManager.get_timeout
|
setup a timeout for waiting for a proposal
|
hydrachain/consensus/manager.py
|
def get_timeout(self):
"setup a timeout for waiting for a proposal"
if self.timeout_time is not None or self.proposal:
return
now = self.cm.chainservice.now
round_timeout = ConsensusManager.round_timeout
round_timeout_factor = ConsensusManager.round_timeout_factor
delay = round_timeout * round_timeout_factor ** self.round
self.timeout_time = now + delay
return delay
|
def get_timeout(self):
"setup a timeout for waiting for a proposal"
if self.timeout_time is not None or self.proposal:
return
now = self.cm.chainservice.now
round_timeout = ConsensusManager.round_timeout
round_timeout_factor = ConsensusManager.round_timeout_factor
delay = round_timeout * round_timeout_factor ** self.round
self.timeout_time = now + delay
return delay
|
[
"setup",
"a",
"timeout",
"for",
"waiting",
"for",
"a",
"proposal"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/manager.py#L572-L581
|
[
"def",
"get_timeout",
"(",
"self",
")",
":",
"if",
"self",
".",
"timeout_time",
"is",
"not",
"None",
"or",
"self",
".",
"proposal",
":",
"return",
"now",
"=",
"self",
".",
"cm",
".",
"chainservice",
".",
"now",
"round_timeout",
"=",
"ConsensusManager",
".",
"round_timeout",
"round_timeout_factor",
"=",
"ConsensusManager",
".",
"round_timeout_factor",
"delay",
"=",
"round_timeout",
"*",
"round_timeout_factor",
"**",
"self",
".",
"round",
"self",
".",
"timeout_time",
"=",
"now",
"+",
"delay",
"return",
"delay"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
Synchronizer.request
|
sync the missing blocks between:
head
highest height with signing lockset
we get these locksets by collecting votes on all heights
|
hydrachain/consensus/synchronizer.py
|
def request(self):
"""
sync the missing blocks between:
head
highest height with signing lockset
we get these locksets by collecting votes on all heights
"""
missing = self.missing
self.cm.log('sync.request', missing=len(missing), requested=len(self.requested),
received=len(self.received))
if self.requested:
self.cm.log('waiting for requested')
return
if len(self.received) + self.max_getproposals_count >= self.max_queued:
self.cm.log('queue is full')
return
if not missing:
self.cm.log('insync')
return
if self.last_active_protocol is None: # FIXME, check if it is active
self.cm.log('no active protocol', last_active_protocol=self.last_active_protocol)
return
self.cm.log('collecting')
blocknumbers = []
for h in missing:
if h not in self.received and h not in self.requested:
blocknumbers.append(h)
self.requested.add(h)
if len(blocknumbers) == self.max_getproposals_count:
break
self.cm.log('collected', num=len(blocknumbers))
if not blocknumbers:
return
self.cm.log('requesting', num=len(blocknumbers),
requesting_range=(blocknumbers[0], blocknumbers[-1]))
self.last_active_protocol.send_getblockproposals(*blocknumbers)
# setup alarm
self.cm.chainservice.setup_alarm(self.timeout, self.on_alarm, blocknumbers)
|
def request(self):
"""
sync the missing blocks between:
head
highest height with signing lockset
we get these locksets by collecting votes on all heights
"""
missing = self.missing
self.cm.log('sync.request', missing=len(missing), requested=len(self.requested),
received=len(self.received))
if self.requested:
self.cm.log('waiting for requested')
return
if len(self.received) + self.max_getproposals_count >= self.max_queued:
self.cm.log('queue is full')
return
if not missing:
self.cm.log('insync')
return
if self.last_active_protocol is None: # FIXME, check if it is active
self.cm.log('no active protocol', last_active_protocol=self.last_active_protocol)
return
self.cm.log('collecting')
blocknumbers = []
for h in missing:
if h not in self.received and h not in self.requested:
blocknumbers.append(h)
self.requested.add(h)
if len(blocknumbers) == self.max_getproposals_count:
break
self.cm.log('collected', num=len(blocknumbers))
if not blocknumbers:
return
self.cm.log('requesting', num=len(blocknumbers),
requesting_range=(blocknumbers[0], blocknumbers[-1]))
self.last_active_protocol.send_getblockproposals(*blocknumbers)
# setup alarm
self.cm.chainservice.setup_alarm(self.timeout, self.on_alarm, blocknumbers)
|
[
"sync",
"the",
"missing",
"blocks",
"between",
":",
"head",
"highest",
"height",
"with",
"signing",
"lockset"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/synchronizer.py#L38-L76
|
[
"def",
"request",
"(",
"self",
")",
":",
"missing",
"=",
"self",
".",
"missing",
"self",
".",
"cm",
".",
"log",
"(",
"'sync.request'",
",",
"missing",
"=",
"len",
"(",
"missing",
")",
",",
"requested",
"=",
"len",
"(",
"self",
".",
"requested",
")",
",",
"received",
"=",
"len",
"(",
"self",
".",
"received",
")",
")",
"if",
"self",
".",
"requested",
":",
"self",
".",
"cm",
".",
"log",
"(",
"'waiting for requested'",
")",
"return",
"if",
"len",
"(",
"self",
".",
"received",
")",
"+",
"self",
".",
"max_getproposals_count",
">=",
"self",
".",
"max_queued",
":",
"self",
".",
"cm",
".",
"log",
"(",
"'queue is full'",
")",
"return",
"if",
"not",
"missing",
":",
"self",
".",
"cm",
".",
"log",
"(",
"'insync'",
")",
"return",
"if",
"self",
".",
"last_active_protocol",
"is",
"None",
":",
"# FIXME, check if it is active",
"self",
".",
"cm",
".",
"log",
"(",
"'no active protocol'",
",",
"last_active_protocol",
"=",
"self",
".",
"last_active_protocol",
")",
"return",
"self",
".",
"cm",
".",
"log",
"(",
"'collecting'",
")",
"blocknumbers",
"=",
"[",
"]",
"for",
"h",
"in",
"missing",
":",
"if",
"h",
"not",
"in",
"self",
".",
"received",
"and",
"h",
"not",
"in",
"self",
".",
"requested",
":",
"blocknumbers",
".",
"append",
"(",
"h",
")",
"self",
".",
"requested",
".",
"add",
"(",
"h",
")",
"if",
"len",
"(",
"blocknumbers",
")",
"==",
"self",
".",
"max_getproposals_count",
":",
"break",
"self",
".",
"cm",
".",
"log",
"(",
"'collected'",
",",
"num",
"=",
"len",
"(",
"blocknumbers",
")",
")",
"if",
"not",
"blocknumbers",
":",
"return",
"self",
".",
"cm",
".",
"log",
"(",
"'requesting'",
",",
"num",
"=",
"len",
"(",
"blocknumbers",
")",
",",
"requesting_range",
"=",
"(",
"blocknumbers",
"[",
"0",
"]",
",",
"blocknumbers",
"[",
"-",
"1",
"]",
")",
")",
"self",
".",
"last_active_protocol",
".",
"send_getblockproposals",
"(",
"*",
"blocknumbers",
")",
"# setup alarm",
"self",
".",
"cm",
".",
"chainservice",
".",
"setup_alarm",
"(",
"self",
".",
"timeout",
",",
"self",
".",
"on_alarm",
",",
"blocknumbers",
")"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
Synchronizer.on_proposal
|
called to inform about synced peers
|
hydrachain/consensus/synchronizer.py
|
def on_proposal(self, proposal, proto):
"called to inform about synced peers"
assert isinstance(proto, HDCProtocol)
assert isinstance(proposal, Proposal)
if proposal.height >= self.cm.height:
assert proposal.lockset.is_valid
self.last_active_protocol = proto
|
def on_proposal(self, proposal, proto):
"called to inform about synced peers"
assert isinstance(proto, HDCProtocol)
assert isinstance(proposal, Proposal)
if proposal.height >= self.cm.height:
assert proposal.lockset.is_valid
self.last_active_protocol = proto
|
[
"called",
"to",
"inform",
"about",
"synced",
"peers"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/synchronizer.py#L78-L84
|
[
"def",
"on_proposal",
"(",
"self",
",",
"proposal",
",",
"proto",
")",
":",
"assert",
"isinstance",
"(",
"proto",
",",
"HDCProtocol",
")",
"assert",
"isinstance",
"(",
"proposal",
",",
"Proposal",
")",
"if",
"proposal",
".",
"height",
">=",
"self",
".",
"cm",
".",
"height",
":",
"assert",
"proposal",
".",
"lockset",
".",
"is_valid",
"self",
".",
"last_active_protocol",
"=",
"proto"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
wait_next_block_factory
|
Creates a `wait_next_block` function, that
will wait `timeout` seconds (`None` = indefinitely)
for a new block to appear.
:param app: the app-instance the function should work for
:param timeout: timeout in seconds
|
hydrachain/nc_utils.py
|
def wait_next_block_factory(app, timeout=None):
"""Creates a `wait_next_block` function, that
will wait `timeout` seconds (`None` = indefinitely)
for a new block to appear.
:param app: the app-instance the function should work for
:param timeout: timeout in seconds
"""
chain = app.services.chain
# setup new block callbacks and events
new_block_evt = gevent.event.Event()
def _on_new_block(app):
log.DEV('new block mined')
new_block_evt.set()
chain.on_new_head_cbs.append(_on_new_block)
def wait_next_block():
bn = chain.chain.head.number
chain.consensus_manager.log('waiting for new block', block=bn)
new_block_evt.wait(timeout)
new_block_evt.clear()
if chain.chain.head.number > bn:
chain.consensus_manager.log('new block event', block=chain.chain.head.number)
elif chain.chain.head.number == bn:
chain.consensus_manager.log('wait_next_block timed out', block=bn)
return wait_next_block
|
def wait_next_block_factory(app, timeout=None):
"""Creates a `wait_next_block` function, that
will wait `timeout` seconds (`None` = indefinitely)
for a new block to appear.
:param app: the app-instance the function should work for
:param timeout: timeout in seconds
"""
chain = app.services.chain
# setup new block callbacks and events
new_block_evt = gevent.event.Event()
def _on_new_block(app):
log.DEV('new block mined')
new_block_evt.set()
chain.on_new_head_cbs.append(_on_new_block)
def wait_next_block():
bn = chain.chain.head.number
chain.consensus_manager.log('waiting for new block', block=bn)
new_block_evt.wait(timeout)
new_block_evt.clear()
if chain.chain.head.number > bn:
chain.consensus_manager.log('new block event', block=chain.chain.head.number)
elif chain.chain.head.number == bn:
chain.consensus_manager.log('wait_next_block timed out', block=bn)
return wait_next_block
|
[
"Creates",
"a",
"wait_next_block",
"function",
"that",
"will",
"wait",
"timeout",
"seconds",
"(",
"None",
"=",
"indefinitely",
")",
"for",
"a",
"new",
"block",
"to",
"appear",
"."
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/nc_utils.py#L40-L69
|
[
"def",
"wait_next_block_factory",
"(",
"app",
",",
"timeout",
"=",
"None",
")",
":",
"chain",
"=",
"app",
".",
"services",
".",
"chain",
"# setup new block callbacks and events",
"new_block_evt",
"=",
"gevent",
".",
"event",
".",
"Event",
"(",
")",
"def",
"_on_new_block",
"(",
"app",
")",
":",
"log",
".",
"DEV",
"(",
"'new block mined'",
")",
"new_block_evt",
".",
"set",
"(",
")",
"chain",
".",
"on_new_head_cbs",
".",
"append",
"(",
"_on_new_block",
")",
"def",
"wait_next_block",
"(",
")",
":",
"bn",
"=",
"chain",
".",
"chain",
".",
"head",
".",
"number",
"chain",
".",
"consensus_manager",
".",
"log",
"(",
"'waiting for new block'",
",",
"block",
"=",
"bn",
")",
"new_block_evt",
".",
"wait",
"(",
"timeout",
")",
"new_block_evt",
".",
"clear",
"(",
")",
"if",
"chain",
".",
"chain",
".",
"head",
".",
"number",
">",
"bn",
":",
"chain",
".",
"consensus_manager",
".",
"log",
"(",
"'new block event'",
",",
"block",
"=",
"chain",
".",
"chain",
".",
"head",
".",
"number",
")",
"elif",
"chain",
".",
"chain",
".",
"head",
".",
"number",
"==",
"bn",
":",
"chain",
".",
"consensus_manager",
".",
"log",
"(",
"'wait_next_block timed out'",
",",
"block",
"=",
"bn",
")",
"return",
"wait_next_block"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
mk_privkeys
|
make privkeys that support coloring, see utils.cstr
|
hydrachain/consensus/simulation.py
|
def mk_privkeys(num):
"make privkeys that support coloring, see utils.cstr"
privkeys = []
assert num <= num_colors
for i in range(num):
j = 0
while True:
k = sha3(str(j))
a = privtoaddr(k)
an = big_endian_to_int(a)
if an % num_colors == i:
break
j += 1
privkeys.append(k)
return privkeys
|
def mk_privkeys(num):
"make privkeys that support coloring, see utils.cstr"
privkeys = []
assert num <= num_colors
for i in range(num):
j = 0
while True:
k = sha3(str(j))
a = privtoaddr(k)
an = big_endian_to_int(a)
if an % num_colors == i:
break
j += 1
privkeys.append(k)
return privkeys
|
[
"make",
"privkeys",
"that",
"support",
"coloring",
"see",
"utils",
".",
"cstr"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/simulation.py#L41-L55
|
[
"def",
"mk_privkeys",
"(",
"num",
")",
":",
"privkeys",
"=",
"[",
"]",
"assert",
"num",
"<=",
"num_colors",
"for",
"i",
"in",
"range",
"(",
"num",
")",
":",
"j",
"=",
"0",
"while",
"True",
":",
"k",
"=",
"sha3",
"(",
"str",
"(",
"j",
")",
")",
"a",
"=",
"privtoaddr",
"(",
"k",
")",
"an",
"=",
"big_endian_to_int",
"(",
"a",
")",
"if",
"an",
"%",
"num_colors",
"==",
"i",
":",
"break",
"j",
"+=",
"1",
"privkeys",
".",
"append",
"(",
"k",
")",
"return",
"privkeys"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
Transport.delay
|
bandwidths are inaccurate, as we don't account for parallel transfers here
|
hydrachain/consensus/simulation.py
|
def delay(self, sender, receiver, packet, add_delay=0):
"""
bandwidths are inaccurate, as we don't account for parallel transfers here
"""
bw = min(sender.ul_bandwidth, receiver.dl_bandwidth)
delay = sender.base_latency + receiver.base_latency
delay += len(packet) / bw
delay += add_delay
return delay
|
def delay(self, sender, receiver, packet, add_delay=0):
"""
bandwidths are inaccurate, as we don't account for parallel transfers here
"""
bw = min(sender.ul_bandwidth, receiver.dl_bandwidth)
delay = sender.base_latency + receiver.base_latency
delay += len(packet) / bw
delay += add_delay
return delay
|
[
"bandwidths",
"are",
"inaccurate",
"as",
"we",
"don",
"t",
"account",
"for",
"parallel",
"transfers",
"here"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/simulation.py#L63-L71
|
[
"def",
"delay",
"(",
"self",
",",
"sender",
",",
"receiver",
",",
"packet",
",",
"add_delay",
"=",
"0",
")",
":",
"bw",
"=",
"min",
"(",
"sender",
".",
"ul_bandwidth",
",",
"receiver",
".",
"dl_bandwidth",
")",
"delay",
"=",
"sender",
".",
"base_latency",
"+",
"receiver",
".",
"base_latency",
"delay",
"+=",
"len",
"(",
"packet",
")",
"/",
"bw",
"delay",
"+=",
"add_delay",
"return",
"delay"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
SlowTransport.deliver
|
deliver on edge of timeout_window
|
hydrachain/consensus/simulation.py
|
def deliver(self, sender, receiver, packet):
"deliver on edge of timeout_window"
to = ConsensusManager.round_timeout
assert to > 0
print "in slow transport deliver"
super(SlowTransport, self).deliver(sender, receiver, packet, add_delay=to)
|
def deliver(self, sender, receiver, packet):
"deliver on edge of timeout_window"
to = ConsensusManager.round_timeout
assert to > 0
print "in slow transport deliver"
super(SlowTransport, self).deliver(sender, receiver, packet, add_delay=to)
|
[
"deliver",
"on",
"edge",
"of",
"timeout_window"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/simulation.py#L104-L109
|
[
"def",
"deliver",
"(",
"self",
",",
"sender",
",",
"receiver",
",",
"packet",
")",
":",
"to",
"=",
"ConsensusManager",
".",
"round_timeout",
"assert",
"to",
">",
"0",
"print",
"\"in slow transport deliver\"",
"super",
"(",
"SlowTransport",
",",
"self",
")",
".",
"deliver",
"(",
"sender",
",",
"receiver",
",",
"packet",
",",
"add_delay",
"=",
"to",
")"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
abi_encode_args
|
encode args for method: method_id|data
|
hydrachain/native_contracts.py
|
def abi_encode_args(method, args):
"encode args for method: method_id|data"
assert issubclass(method.im_class, NativeABIContract), method.im_class
m_abi = method.im_class._get_method_abi(method)
return zpad(encode_int(m_abi['id']), 4) + abi.encode_abi(m_abi['arg_types'], args)
|
def abi_encode_args(method, args):
"encode args for method: method_id|data"
assert issubclass(method.im_class, NativeABIContract), method.im_class
m_abi = method.im_class._get_method_abi(method)
return zpad(encode_int(m_abi['id']), 4) + abi.encode_abi(m_abi['arg_types'], args)
|
[
"encode",
"args",
"for",
"method",
":",
"method_id|data"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/native_contracts.py#L215-L219
|
[
"def",
"abi_encode_args",
"(",
"method",
",",
"args",
")",
":",
"assert",
"issubclass",
"(",
"method",
".",
"im_class",
",",
"NativeABIContract",
")",
",",
"method",
".",
"im_class",
"m_abi",
"=",
"method",
".",
"im_class",
".",
"_get_method_abi",
"(",
"method",
")",
"return",
"zpad",
"(",
"encode_int",
"(",
"m_abi",
"[",
"'id'",
"]",
")",
",",
"4",
")",
"+",
"abi",
".",
"encode_abi",
"(",
"m_abi",
"[",
"'arg_types'",
"]",
",",
"args",
")"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
chain_nac_proxy
|
create an object which acts as a proxy for the contract on the chain
|
hydrachain/native_contracts.py
|
def chain_nac_proxy(chain, sender, contract_address, value=0):
"create an object which acts as a proxy for the contract on the chain"
klass = registry[contract_address].im_self
assert issubclass(klass, NativeABIContract)
def mk_method(method):
def m(s, *args):
data = abi_encode_args(method, args)
block = chain.head_candidate
output = test_call(block, sender, contract_address, data)
if output is not None:
return abi_decode_return_vals(method, output)
return m
class cproxy(object):
pass
for m in klass._abi_methods():
setattr(cproxy, m.__func__.func_name, mk_method(m))
return cproxy()
|
def chain_nac_proxy(chain, sender, contract_address, value=0):
"create an object which acts as a proxy for the contract on the chain"
klass = registry[contract_address].im_self
assert issubclass(klass, NativeABIContract)
def mk_method(method):
def m(s, *args):
data = abi_encode_args(method, args)
block = chain.head_candidate
output = test_call(block, sender, contract_address, data)
if output is not None:
return abi_decode_return_vals(method, output)
return m
class cproxy(object):
pass
for m in klass._abi_methods():
setattr(cproxy, m.__func__.func_name, mk_method(m))
return cproxy()
|
[
"create",
"an",
"object",
"which",
"acts",
"as",
"a",
"proxy",
"for",
"the",
"contract",
"on",
"the",
"chain"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/native_contracts.py#L587-L606
|
[
"def",
"chain_nac_proxy",
"(",
"chain",
",",
"sender",
",",
"contract_address",
",",
"value",
"=",
"0",
")",
":",
"klass",
"=",
"registry",
"[",
"contract_address",
"]",
".",
"im_self",
"assert",
"issubclass",
"(",
"klass",
",",
"NativeABIContract",
")",
"def",
"mk_method",
"(",
"method",
")",
":",
"def",
"m",
"(",
"s",
",",
"*",
"args",
")",
":",
"data",
"=",
"abi_encode_args",
"(",
"method",
",",
"args",
")",
"block",
"=",
"chain",
".",
"head_candidate",
"output",
"=",
"test_call",
"(",
"block",
",",
"sender",
",",
"contract_address",
",",
"data",
")",
"if",
"output",
"is",
"not",
"None",
":",
"return",
"abi_decode_return_vals",
"(",
"method",
",",
"output",
")",
"return",
"m",
"class",
"cproxy",
"(",
"object",
")",
":",
"pass",
"for",
"m",
"in",
"klass",
".",
"_abi_methods",
"(",
")",
":",
"setattr",
"(",
"cproxy",
",",
"m",
".",
"__func__",
".",
"func_name",
",",
"mk_method",
"(",
"m",
")",
")",
"return",
"cproxy",
"(",
")"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
Registry.address_to_native_contract_class
|
returns class._on_msg_unsafe, use x.im_self to get class
|
hydrachain/native_contracts.py
|
def address_to_native_contract_class(self, address):
"returns class._on_msg_unsafe, use x.im_self to get class"
assert isinstance(address, bytes) and len(address) == 20
assert self.is_instance_address(address)
nca = self.native_contract_address_prefix + address[-4:]
return self.native_contracts[nca]
|
def address_to_native_contract_class(self, address):
"returns class._on_msg_unsafe, use x.im_self to get class"
assert isinstance(address, bytes) and len(address) == 20
assert self.is_instance_address(address)
nca = self.native_contract_address_prefix + address[-4:]
return self.native_contracts[nca]
|
[
"returns",
"class",
".",
"_on_msg_unsafe",
"use",
"x",
".",
"im_self",
"to",
"get",
"class"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/native_contracts.py#L79-L84
|
[
"def",
"address_to_native_contract_class",
"(",
"self",
",",
"address",
")",
":",
"assert",
"isinstance",
"(",
"address",
",",
"bytes",
")",
"and",
"len",
"(",
"address",
")",
"==",
"20",
"assert",
"self",
".",
"is_instance_address",
"(",
"address",
")",
"nca",
"=",
"self",
".",
"native_contract_address_prefix",
"+",
"address",
"[",
"-",
"4",
":",
"]",
"return",
"self",
".",
"native_contracts",
"[",
"nca",
"]"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
Registry.register
|
registers NativeContract classes
|
hydrachain/native_contracts.py
|
def register(self, contract):
"registers NativeContract classes"
assert issubclass(contract, NativeContractBase)
assert len(contract.address) == 20
assert contract.address.startswith(self.native_contract_address_prefix)
if self.native_contracts.get(contract.address) == contract._on_msg:
log.debug("already registered", contract=contract, address=contract.address)
return
assert contract.address not in self.native_contracts, 'address already taken'
self.native_contracts[contract.address] = contract._on_msg
log.debug("registered native contract", contract=contract, address=contract.address)
|
def register(self, contract):
"registers NativeContract classes"
assert issubclass(contract, NativeContractBase)
assert len(contract.address) == 20
assert contract.address.startswith(self.native_contract_address_prefix)
if self.native_contracts.get(contract.address) == contract._on_msg:
log.debug("already registered", contract=contract, address=contract.address)
return
assert contract.address not in self.native_contracts, 'address already taken'
self.native_contracts[contract.address] = contract._on_msg
log.debug("registered native contract", contract=contract, address=contract.address)
|
[
"registers",
"NativeContract",
"classes"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/native_contracts.py#L86-L96
|
[
"def",
"register",
"(",
"self",
",",
"contract",
")",
":",
"assert",
"issubclass",
"(",
"contract",
",",
"NativeContractBase",
")",
"assert",
"len",
"(",
"contract",
".",
"address",
")",
"==",
"20",
"assert",
"contract",
".",
"address",
".",
"startswith",
"(",
"self",
".",
"native_contract_address_prefix",
")",
"if",
"self",
".",
"native_contracts",
".",
"get",
"(",
"contract",
".",
"address",
")",
"==",
"contract",
".",
"_on_msg",
":",
"log",
".",
"debug",
"(",
"\"already registered\"",
",",
"contract",
"=",
"contract",
",",
"address",
"=",
"contract",
".",
"address",
")",
"return",
"assert",
"contract",
".",
"address",
"not",
"in",
"self",
".",
"native_contracts",
",",
"'address already taken'",
"self",
".",
"native_contracts",
"[",
"contract",
".",
"address",
"]",
"=",
"contract",
".",
"_on_msg",
"log",
".",
"debug",
"(",
"\"registered native contract\"",
",",
"contract",
"=",
"contract",
",",
"address",
"=",
"contract",
".",
"address",
")"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
validators_from_config
|
Consolidate (potentially hex-encoded) list of validators
into list of binary address representations.
|
hydrachain/hdc_service.py
|
def validators_from_config(validators):
"""Consolidate (potentially hex-encoded) list of validators
into list of binary address representations.
"""
result = []
for validator in validators:
if len(validator) == 40:
validator = validator.decode('hex')
result.append(validator)
return result
|
def validators_from_config(validators):
"""Consolidate (potentially hex-encoded) list of validators
into list of binary address representations.
"""
result = []
for validator in validators:
if len(validator) == 40:
validator = validator.decode('hex')
result.append(validator)
return result
|
[
"Consolidate",
"(",
"potentially",
"hex",
"-",
"encoded",
")",
"list",
"of",
"validators",
"into",
"list",
"of",
"binary",
"address",
"representations",
"."
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/hdc_service.py#L530-L539
|
[
"def",
"validators_from_config",
"(",
"validators",
")",
":",
"result",
"=",
"[",
"]",
"for",
"validator",
"in",
"validators",
":",
"if",
"len",
"(",
"validator",
")",
"==",
"40",
":",
"validator",
"=",
"validator",
".",
"decode",
"(",
"'hex'",
")",
"result",
".",
"append",
"(",
"validator",
")",
"return",
"result"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
DuplicatesFilter.update
|
returns True if unknown
|
hydrachain/hdc_service.py
|
def update(self, data):
"returns True if unknown"
if data not in self.filter:
self.filter.append(data)
if len(self.filter) > self.max_items:
self.filter.pop(0)
return True
else:
self.filter.append(self.filter.pop(0))
return False
|
def update(self, data):
"returns True if unknown"
if data not in self.filter:
self.filter.append(data)
if len(self.filter) > self.max_items:
self.filter.pop(0)
return True
else:
self.filter.append(self.filter.pop(0))
return False
|
[
"returns",
"True",
"if",
"unknown"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/hdc_service.py#L52-L61
|
[
"def",
"update",
"(",
"self",
",",
"data",
")",
":",
"if",
"data",
"not",
"in",
"self",
".",
"filter",
":",
"self",
".",
"filter",
".",
"append",
"(",
"data",
")",
"if",
"len",
"(",
"self",
".",
"filter",
")",
">",
"self",
".",
"max_items",
":",
"self",
".",
"filter",
".",
"pop",
"(",
"0",
")",
"return",
"True",
"else",
":",
"self",
".",
"filter",
".",
"append",
"(",
"self",
".",
"filter",
".",
"pop",
"(",
"0",
")",
")",
"return",
"False"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
ChainService.add_transaction
|
Warning:
Locking proposal_lock may block incoming events which are necessary to unlock!
I.e. votes / blocks!
Take care!
|
hydrachain/hdc_service.py
|
def add_transaction(self, tx, origin=None, force_broadcast=False):
"""
Warning:
Locking proposal_lock may block incoming events which are necessary to unlock!
I.e. votes / blocks!
Take care!
"""
self.consensus_manager.log(
'add_transaction', blk=self.chain.head_candidate, lock=self.proposal_lock)
log.debug('add_transaction', lock=self.proposal_lock)
block = self.proposal_lock.block
self.proposal_lock.acquire()
self.consensus_manager.log('add_transaction acquired lock', lock=self.proposal_lock)
assert not hasattr(self.chain.head_candidate, 'should_be_locked')
success = super(ChainService, self).add_transaction(tx, origin, force_broadcast)
if self.proposal_lock.is_locked(): # can be unlock if we are at a new block
self.proposal_lock.release(if_block=block)
log.debug('added transaction', num_txs=self.chain.head_candidate.num_transactions())
return success
|
def add_transaction(self, tx, origin=None, force_broadcast=False):
"""
Warning:
Locking proposal_lock may block incoming events which are necessary to unlock!
I.e. votes / blocks!
Take care!
"""
self.consensus_manager.log(
'add_transaction', blk=self.chain.head_candidate, lock=self.proposal_lock)
log.debug('add_transaction', lock=self.proposal_lock)
block = self.proposal_lock.block
self.proposal_lock.acquire()
self.consensus_manager.log('add_transaction acquired lock', lock=self.proposal_lock)
assert not hasattr(self.chain.head_candidate, 'should_be_locked')
success = super(ChainService, self).add_transaction(tx, origin, force_broadcast)
if self.proposal_lock.is_locked(): # can be unlock if we are at a new block
self.proposal_lock.release(if_block=block)
log.debug('added transaction', num_txs=self.chain.head_candidate.num_transactions())
return success
|
[
"Warning",
":",
"Locking",
"proposal_lock",
"may",
"block",
"incoming",
"events",
"which",
"are",
"necessary",
"to",
"unlock!",
"I",
".",
"e",
".",
"votes",
"/",
"blocks!",
"Take",
"care!"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/hdc_service.py#L307-L325
|
[
"def",
"add_transaction",
"(",
"self",
",",
"tx",
",",
"origin",
"=",
"None",
",",
"force_broadcast",
"=",
"False",
")",
":",
"self",
".",
"consensus_manager",
".",
"log",
"(",
"'add_transaction'",
",",
"blk",
"=",
"self",
".",
"chain",
".",
"head_candidate",
",",
"lock",
"=",
"self",
".",
"proposal_lock",
")",
"log",
".",
"debug",
"(",
"'add_transaction'",
",",
"lock",
"=",
"self",
".",
"proposal_lock",
")",
"block",
"=",
"self",
".",
"proposal_lock",
".",
"block",
"self",
".",
"proposal_lock",
".",
"acquire",
"(",
")",
"self",
".",
"consensus_manager",
".",
"log",
"(",
"'add_transaction acquired lock'",
",",
"lock",
"=",
"self",
".",
"proposal_lock",
")",
"assert",
"not",
"hasattr",
"(",
"self",
".",
"chain",
".",
"head_candidate",
",",
"'should_be_locked'",
")",
"success",
"=",
"super",
"(",
"ChainService",
",",
"self",
")",
".",
"add_transaction",
"(",
"tx",
",",
"origin",
",",
"force_broadcast",
")",
"if",
"self",
".",
"proposal_lock",
".",
"is_locked",
"(",
")",
":",
"# can be unlock if we are at a new block",
"self",
".",
"proposal_lock",
".",
"release",
"(",
"if_block",
"=",
"block",
")",
"log",
".",
"debug",
"(",
"'added transaction'",
",",
"num_txs",
"=",
"self",
".",
"chain",
".",
"head_candidate",
".",
"num_transactions",
"(",
")",
")",
"return",
"success"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
ChainService.on_receive_transactions
|
receives rlp.decoded serialized
|
hydrachain/hdc_service.py
|
def on_receive_transactions(self, proto, transactions):
"receives rlp.decoded serialized"
log.debug('----------------------------------')
log.debug('remote_transactions_received', count=len(transactions), remote_id=proto)
def _add_txs():
for tx in transactions:
self.add_transaction(tx, origin=proto)
gevent.spawn(_add_txs)
|
def on_receive_transactions(self, proto, transactions):
"receives rlp.decoded serialized"
log.debug('----------------------------------')
log.debug('remote_transactions_received', count=len(transactions), remote_id=proto)
def _add_txs():
for tx in transactions:
self.add_transaction(tx, origin=proto)
gevent.spawn(_add_txs)
|
[
"receives",
"rlp",
".",
"decoded",
"serialized"
] |
HydraChain/hydrachain
|
python
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/hdc_service.py#L366-L374
|
[
"def",
"on_receive_transactions",
"(",
"self",
",",
"proto",
",",
"transactions",
")",
":",
"log",
".",
"debug",
"(",
"'----------------------------------'",
")",
"log",
".",
"debug",
"(",
"'remote_transactions_received'",
",",
"count",
"=",
"len",
"(",
"transactions",
")",
",",
"remote_id",
"=",
"proto",
")",
"def",
"_add_txs",
"(",
")",
":",
"for",
"tx",
"in",
"transactions",
":",
"self",
".",
"add_transaction",
"(",
"tx",
",",
"origin",
"=",
"proto",
")",
"gevent",
".",
"spawn",
"(",
"_add_txs",
")"
] |
6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3
|
test
|
img_from_vgg
|
Decondition an image from the VGG16 model.
|
keras_vgg_buddy/models.py
|
def img_from_vgg(x):
'''Decondition an image from the VGG16 model.'''
x = x.transpose((1, 2, 0))
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
x = x[:,:,::-1] # to RGB
return x
|
def img_from_vgg(x):
'''Decondition an image from the VGG16 model.'''
x = x.transpose((1, 2, 0))
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
x = x[:,:,::-1] # to RGB
return x
|
[
"Decondition",
"an",
"image",
"from",
"the",
"VGG16",
"model",
"."
] |
awentzonline/keras-vgg-buddy
|
python
|
https://github.com/awentzonline/keras-vgg-buddy/blob/716cb66396b839a66ec8dc66998066b360a8f395/keras_vgg_buddy/models.py#L11-L18
|
[
"def",
"img_from_vgg",
"(",
"x",
")",
":",
"x",
"=",
"x",
".",
"transpose",
"(",
"(",
"1",
",",
"2",
",",
"0",
")",
")",
"x",
"[",
":",
",",
":",
",",
"0",
"]",
"+=",
"103.939",
"x",
"[",
":",
",",
":",
",",
"1",
"]",
"+=",
"116.779",
"x",
"[",
":",
",",
":",
",",
"2",
"]",
"+=",
"123.68",
"x",
"=",
"x",
"[",
":",
",",
":",
",",
":",
":",
"-",
"1",
"]",
"# to RGB",
"return",
"x"
] |
716cb66396b839a66ec8dc66998066b360a8f395
|
test
|
img_to_vgg
|
Condition an image for use with the VGG16 model.
|
keras_vgg_buddy/models.py
|
def img_to_vgg(x):
'''Condition an image for use with the VGG16 model.'''
x = x[:,:,::-1] # to BGR
x[:, :, 0] -= 103.939
x[:, :, 1] -= 116.779
x[:, :, 2] -= 123.68
x = x.transpose((2, 0, 1))
return x
|
def img_to_vgg(x):
'''Condition an image for use with the VGG16 model.'''
x = x[:,:,::-1] # to BGR
x[:, :, 0] -= 103.939
x[:, :, 1] -= 116.779
x[:, :, 2] -= 123.68
x = x.transpose((2, 0, 1))
return x
|
[
"Condition",
"an",
"image",
"for",
"use",
"with",
"the",
"VGG16",
"model",
"."
] |
awentzonline/keras-vgg-buddy
|
python
|
https://github.com/awentzonline/keras-vgg-buddy/blob/716cb66396b839a66ec8dc66998066b360a8f395/keras_vgg_buddy/models.py#L21-L28
|
[
"def",
"img_to_vgg",
"(",
"x",
")",
":",
"x",
"=",
"x",
"[",
":",
",",
":",
",",
":",
":",
"-",
"1",
"]",
"# to BGR",
"x",
"[",
":",
",",
":",
",",
"0",
"]",
"-=",
"103.939",
"x",
"[",
":",
",",
":",
",",
"1",
"]",
"-=",
"116.779",
"x",
"[",
":",
",",
":",
",",
"2",
"]",
"-=",
"123.68",
"x",
"=",
"x",
".",
"transpose",
"(",
"(",
"2",
",",
"0",
",",
"1",
")",
")",
"return",
"x"
] |
716cb66396b839a66ec8dc66998066b360a8f395
|
test
|
VGG16.get_f_layer
|
Create a function for the response of a layer.
|
keras_vgg_buddy/models.py
|
def get_f_layer(self, layer_name):
'''Create a function for the response of a layer.'''
inputs = [self.net_input]
if self.learning_phase is not None:
inputs.append(K.learning_phase())
return K.function(inputs, [self.get_layer_output(layer_name)])
|
def get_f_layer(self, layer_name):
'''Create a function for the response of a layer.'''
inputs = [self.net_input]
if self.learning_phase is not None:
inputs.append(K.learning_phase())
return K.function(inputs, [self.get_layer_output(layer_name)])
|
[
"Create",
"a",
"function",
"for",
"the",
"response",
"of",
"a",
"layer",
"."
] |
awentzonline/keras-vgg-buddy
|
python
|
https://github.com/awentzonline/keras-vgg-buddy/blob/716cb66396b839a66ec8dc66998066b360a8f395/keras_vgg_buddy/models.py#L41-L46
|
[
"def",
"get_f_layer",
"(",
"self",
",",
"layer_name",
")",
":",
"inputs",
"=",
"[",
"self",
".",
"net_input",
"]",
"if",
"self",
".",
"learning_phase",
"is",
"not",
"None",
":",
"inputs",
".",
"append",
"(",
"K",
".",
"learning_phase",
"(",
")",
")",
"return",
"K",
".",
"function",
"(",
"inputs",
",",
"[",
"self",
".",
"get_layer_output",
"(",
"layer_name",
")",
"]",
")"
] |
716cb66396b839a66ec8dc66998066b360a8f395
|
test
|
VGG16.get_layer_output
|
Get symbolic output of a layer.
|
keras_vgg_buddy/models.py
|
def get_layer_output(self, name):
'''Get symbolic output of a layer.'''
if not name in self._f_layer_outputs:
layer = self.net.get_layer(name)
self._f_layer_outputs[name] = layer.output
return self._f_layer_outputs[name]
|
def get_layer_output(self, name):
'''Get symbolic output of a layer.'''
if not name in self._f_layer_outputs:
layer = self.net.get_layer(name)
self._f_layer_outputs[name] = layer.output
return self._f_layer_outputs[name]
|
[
"Get",
"symbolic",
"output",
"of",
"a",
"layer",
"."
] |
awentzonline/keras-vgg-buddy
|
python
|
https://github.com/awentzonline/keras-vgg-buddy/blob/716cb66396b839a66ec8dc66998066b360a8f395/keras_vgg_buddy/models.py#L48-L53
|
[
"def",
"get_layer_output",
"(",
"self",
",",
"name",
")",
":",
"if",
"not",
"name",
"in",
"self",
".",
"_f_layer_outputs",
":",
"layer",
"=",
"self",
".",
"net",
".",
"get_layer",
"(",
"name",
")",
"self",
".",
"_f_layer_outputs",
"[",
"name",
"]",
"=",
"layer",
".",
"output",
"return",
"self",
".",
"_f_layer_outputs",
"[",
"name",
"]"
] |
716cb66396b839a66ec8dc66998066b360a8f395
|
test
|
VGG16.get_features
|
Evaluate layer outputs for `x`
|
keras_vgg_buddy/models.py
|
def get_features(self, x, layers):
'''Evaluate layer outputs for `x`'''
if not layers:
return None
inputs = [self.net.input]
if self.learning_phase is not None:
inputs.append(self.learning_phase)
f = K.function(inputs, [self.get_layer_output(layer_name) for layer_name in layers])
feature_outputs = f([x])
features = dict(zip(layers, feature_outputs))
return features
|
def get_features(self, x, layers):
'''Evaluate layer outputs for `x`'''
if not layers:
return None
inputs = [self.net.input]
if self.learning_phase is not None:
inputs.append(self.learning_phase)
f = K.function(inputs, [self.get_layer_output(layer_name) for layer_name in layers])
feature_outputs = f([x])
features = dict(zip(layers, feature_outputs))
return features
|
[
"Evaluate",
"layer",
"outputs",
"for",
"x"
] |
awentzonline/keras-vgg-buddy
|
python
|
https://github.com/awentzonline/keras-vgg-buddy/blob/716cb66396b839a66ec8dc66998066b360a8f395/keras_vgg_buddy/models.py#L59-L69
|
[
"def",
"get_features",
"(",
"self",
",",
"x",
",",
"layers",
")",
":",
"if",
"not",
"layers",
":",
"return",
"None",
"inputs",
"=",
"[",
"self",
".",
"net",
".",
"input",
"]",
"if",
"self",
".",
"learning_phase",
"is",
"not",
"None",
":",
"inputs",
".",
"append",
"(",
"self",
".",
"learning_phase",
")",
"f",
"=",
"K",
".",
"function",
"(",
"inputs",
",",
"[",
"self",
".",
"get_layer_output",
"(",
"layer_name",
")",
"for",
"layer_name",
"in",
"layers",
"]",
")",
"feature_outputs",
"=",
"f",
"(",
"[",
"x",
"]",
")",
"features",
"=",
"dict",
"(",
"zip",
"(",
"layers",
",",
"feature_outputs",
")",
")",
"return",
"features"
] |
716cb66396b839a66ec8dc66998066b360a8f395
|
test
|
create_key_file
|
Creates a new encryption key in the path provided and sets the file
permissions. Setting the file permissions currently does not work
on Windows platforms because of the differences in how file
permissions are read and modified.
|
giraffez/encrypt.py
|
def create_key_file(path):
"""
Creates a new encryption key in the path provided and sets the file
permissions. Setting the file permissions currently does not work
on Windows platforms because of the differences in how file
permissions are read and modified.
"""
iv = "{}{}".format(os.urandom(32), time.time())
new_key = generate_key(ensure_bytes(iv))
with open(path, "wb") as f:
f.write(base64.b64encode(new_key))
os.chmod(path, 0o400)
|
def create_key_file(path):
"""
Creates a new encryption key in the path provided and sets the file
permissions. Setting the file permissions currently does not work
on Windows platforms because of the differences in how file
permissions are read and modified.
"""
iv = "{}{}".format(os.urandom(32), time.time())
new_key = generate_key(ensure_bytes(iv))
with open(path, "wb") as f:
f.write(base64.b64encode(new_key))
os.chmod(path, 0o400)
|
[
"Creates",
"a",
"new",
"encryption",
"key",
"in",
"the",
"path",
"provided",
"and",
"sets",
"the",
"file",
"permissions",
".",
"Setting",
"the",
"file",
"permissions",
"currently",
"does",
"not",
"work",
"on",
"Windows",
"platforms",
"because",
"of",
"the",
"differences",
"in",
"how",
"file",
"permissions",
"are",
"read",
"and",
"modified",
"."
] |
capitalone/giraffez
|
python
|
https://github.com/capitalone/giraffez/blob/6b4d27eb1a1eaf188c6885c7364ef27e92b1b957/giraffez/encrypt.py#L30-L41
|
[
"def",
"create_key_file",
"(",
"path",
")",
":",
"iv",
"=",
"\"{}{}\"",
".",
"format",
"(",
"os",
".",
"urandom",
"(",
"32",
")",
",",
"time",
".",
"time",
"(",
")",
")",
"new_key",
"=",
"generate_key",
"(",
"ensure_bytes",
"(",
"iv",
")",
")",
"with",
"open",
"(",
"path",
",",
"\"wb\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"base64",
".",
"b64encode",
"(",
"new_key",
")",
")",
"os",
".",
"chmod",
"(",
"path",
",",
"0o400",
")"
] |
6b4d27eb1a1eaf188c6885c7364ef27e92b1b957
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.