code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
lv_stats = {}
for la in nd._mv_grid_districts[0].lv_load_areas():
for lvgd in la.lv_grid_districts():
station_neighbors = list(lvgd.lv_grid._graph[
lvgd.lv_grid._station].keys())
# check if nodes of a statio are members of list generators
station_generators = [x for x in station_neighbors
if x in lvgd.lv_grid.generators()]
lv_stats[repr(lvgd.lv_grid._station)] = station_generators
return lv_stats
|
def lv_grid_generators_bus_bar(nd)
|
Calculate statistics about generators at bus bar in LV grids
Parameters
----------
nd : ding0.NetworkDing0
Network container object
Returns
-------
lv_stats : dict
Dict with keys of LV grid repr() on first level. Each of the grids has
a set of statistical information about its topology
| 8.326947
| 7.419044
| 1.122375
|
abs_path = os.path.abspath(path)
if len(nd._mv_grid_districts) > 1:
name_extension = '_{number}-{number2}'.format(
number=nd._mv_grid_districts[0].id_db,
number2=nd._mv_grid_districts[-1].id_db)
else:
name_extension = '_{number}'.format(number=nd._mv_grid_districts[0].id_db)
if filename is None:
filename = "ding0_grids_{ext}.pkl".format(
ext=name_extension)
# delete attributes of `nd` in order to make pickling work
# del nd._config
del nd._orm
pickle.dump(nd, open(os.path.join(abs_path, filename), "wb"))
|
def save_nd_to_pickle(nd, path='', filename=None)
|
Use pickle to save the whole nd-object to disc
The network instance is entirely pickled to a file.
Parameters
----------
nd : NetworkDing0
Ding0 grid container object
path : str
Absolute or relative path where pickle should be saved. Default is ''
which means pickle is save to PWD
| 4.342109
| 3.969701
| 1.093813
|
abs_path = os.path.abspath(path)
if filename is None:
raise NotImplementedError
return pickle.load(open(os.path.join(abs_path, filename), "rb"))
|
def load_nd_from_pickle(filename=None, path='')
|
Use pickle to save the whole nd-object to disc
Parameters
----------
filename : str
Filename of nd pickle
path : str
Absolute or relative path where pickle should be saved. Default is ''
which means pickle is save to PWD
Returns
-------
nd : NetworkDing0
Ding0 grid container object
| 3.332886
| 4.019889
| 0.829099
|
# cable and line kilometer distribution
f, axarr = plt.subplots(2, 2, sharex=True)
stats.hist(column=['Length of MV overhead lines'], bins=5, alpha=0.5, ax=axarr[0, 0])
stats.hist(column=['Length of MV underground cables'], bins=5, alpha=0.5, ax=axarr[0, 1])
stats.hist(column=['Length of LV overhead lines'], bins=5, alpha=0.5, ax=axarr[1, 0])
stats.hist(column=['Length of LV underground cables'], bins=5, alpha=0.5, ax=axarr[1, 1])
plt.savefig(os.path.join(plotpath,
'Histogram_cable_line_length.pdf'))
|
def plot_cable_length(stats, plotpath)
|
Cable length per MV grid district
| 2.629634
| 2.498987
| 1.05228
|
# Generation capacity vs. peak load
sns.set_context("paper", font_scale=1.1)
sns.set_style("ticks")
# reformat to MW
gen_cap_indexes = ["Gen. Cap. of MV at v_level 4",
"Gen. Cap. of MV at v_level 5",
"Gen. Cap. of LV at v_level 6",
"Gen. Cap. of LV at v_level 7"]
peak_load_index = ["LA Total LV Peak Load total"]
stats['generation_capacity'] = stats[gen_cap_indexes].sum(axis=1) / 1e3
stats['peak_load'] = stats[peak_load_index] / 1e3
sns.lmplot('generation_capacity', 'peak_load',
data=stats,
fit_reg=False,
# hue='v_nom',
# hue='Voltage level',
scatter_kws={"marker": "D",
"s": 100},
aspect=2)
plt.title('Peak load vs. generation capacity')
plt.xlabel('Generation capacity in MW')
plt.ylabel('Peak load in MW')
plt.savefig(os.path.join(plotpath,
'Scatter_generation_load.pdf'))
|
def plot_generation_over_load(stats, plotpath)
|
Plot of generation over load
| 3.914292
| 3.904088
| 1.002614
|
pickle_name = cfg_ding0.get('output', 'nd_pickle')
# self.nd = self.read_pickles_from_files(pickle_name)
# TODO: instead of passing a list of mvgd's, pass list of filenames plus optionally a basth_path
for mvgd in mv_grid_districts[1:]:
filename = os.path.join(
self.base_path,
'results', pickle_name.format(mvgd))
if os.path.isfile(filename):
mvgd_pickle = pickle.load(open(filename, 'rb'))
if mvgd_pickle._mv_grid_districts:
mvgd.add_mv_grid_district(mvgd_pickle._mv_grid_districts[0])
# save to concatenated pickle
pickle.dump(mvgd,
open(os.path.join(
self.base_path,
'results',
"ding0_grids_{0}-{1}.pkl".format(
mv_grid_districts[0],
mv_grid_districts[-1])),
"wb"))
# save stats (edges and nodes data) to csv
nodes, edges = mvgd.to_dataframe()
nodes.to_csv(os.path.join(
self.base_path,
'results', 'mvgd_nodes_stats_{0}-{1}.csv'.format(
mv_grid_districts[0], mv_grid_districts[-1])),
index=False)
edges.to_csv(os.path.join(
self.base_path,
'results', 'mvgd_edges_stats_{0}-{1}.csv'.format(
mv_grid_districts[0], mv_grid_districts[-1])),
index=False)
|
def concat_nd_pickles(self, mv_grid_districts)
|
Read multiple pickles, join nd objects and save to file
Parameters
----------
mv_grid_districts : list
Ints describing MV grid districts
| 3.325568
| 3.369687
| 0.986907
|
##############################
# close circuit breakers
nw.control_circuit_breakers(mode='close')
##############################
nodes_idx = 0
nodes_dict = {}
branches_idx = 0
branches_dict = {}
for district in nw.mv_grid_districts():
# nodes voltage
for node in district.mv_grid.graph_nodes_sorted():
nodes_idx += 1
if hasattr(node, 'voltage_res'):
Vres0 = node.voltage_res[0]
Vres1 = node.voltage_res[1]
else:
Vres0 = 'Not available'
Vres1 = 'Not available'
nodes_dict[nodes_idx] = {'MV_grid_id': district.mv_grid.id_db,
'node id': node.__repr__(),
'V_res_0': Vres0,
'V_res_1': Vres1,
'V nominal': district.mv_grid.v_level}
# branches currents
for branch in district.mv_grid.graph_edges():
branches_idx += 1
if hasattr(branch['branch'], 's_res'):
s_res0 = branch['branch'].s_res[0]
s_res1 = branch['branch'].s_res[1]
else:
s_res0 = 'Not available'
s_res1 = 'Not available'
branches_dict[branches_idx] = {
'MV_grid_id': district.mv_grid.id_db,
'branch id': branch['branch'].__repr__(), # .id_db
's_res_0': s_res0,
's_res_1': s_res1,
# 'length': branch['branch'].length / 1e3,
}
nodes_df = pd.DataFrame.from_dict(nodes_dict, orient='index')
branches_df = pd.DataFrame.from_dict(branches_dict, orient='index')
if not nodes_df.empty:
nodes_df = nodes_df.set_index('node id')
nodes_df = nodes_df.fillna(0)
nodes_df = nodes_df[sorted(nodes_df.columns.tolist())]
nodes_df.sort_index(inplace=True)
if not branches_df.empty:
branches_df = branches_df.set_index('branch id')
branches_df = branches_df.fillna(0)
branches_df = branches_df[sorted(branches_df.columns.tolist())]
branches_df.sort_index(inplace=True)
return (nodes_df, branches_df)
|
def calculate_mvgd_voltage_current_stats(nw)
|
MV Voltage and Current Statistics for an arbitrary network
Parameters
----------
nw: :any:`list` of NetworkDing0
The MV grid(s) to be studied
Returns
-------
pandas.DataFrame
nodes_df : Dataframe containing voltage statistics for every node in the MVGD
pandas.DataFrame
lines_df : Dataframe containing voltage statistics for every edge in the MVGD
| 2.248526
| 2.218987
| 1.013312
|
##############################
# close circuit breakers
nw.control_circuit_breakers(mode='close')
##############################
nodes_idx = 0
nodes_dict = {}
branches_idx = 0
branches_dict = {}
for mv_district in nw.mv_grid_districts():
for LA in mv_district.lv_load_areas():
if not LA.is_aggregated:
for lv_district in LA.lv_grid_districts():
# nodes voltage
crit_nodes = get_critical_voltage_at_nodes(lv_district.lv_grid)
for node in crit_nodes:
nodes_idx += 1
nodes_dict[nodes_idx] = {
'MV_grid_id': mv_district.mv_grid.id_db,
'LV_grid_id': lv_district.lv_grid.id_db,
'LA_id': LA.id_db,
'node id': node['node'].__repr__(),
'v_diff_0': node['v_diff'][0],
'v_diff_1': node['v_diff'][1],
's_max_0': 'NA',
's_max_1': 'NA',
'V nominal': lv_district.lv_grid.v_level,
}
# branches currents
critical_branches, critical_stations = get_critical_line_loading(lv_district.lv_grid)
for branch in critical_branches:
branches_idx += 1
branches_dict[branches_idx] = {
'MV_grid_id': mv_district.mv_grid.id_db,
'LV_grid_id': lv_district.lv_grid.id_db,
'LA_id': LA.id_db,
'branch id': branch['branch'].__repr__(),
's_max_0': branch['s_max'][0],
's_max_1': branch['s_max'][1],
}
# stations
for node in critical_stations:
nodes_idx += 1
nodes_dict[nodes_idx] = {
'MV_grid_id': mv_district.mv_grid.id_db,
'LV_grid_id': lv_district.lv_grid.id_db,
'LA_id': LA.id_db,
'node id': node['station'].__repr__(),
's_max_0': node['s_max'][0],
's_max_1': node['s_max'][1],
'v_diff_0': 'NA',
'v_diff_1': 'NA',
}
nodes_df = pd.DataFrame.from_dict(nodes_dict, orient='index')
branches_df = pd.DataFrame.from_dict(branches_dict, orient='index')
if not nodes_df.empty:
nodes_df = nodes_df.set_index('node id')
nodes_df = nodes_df.fillna(0)
nodes_df = nodes_df[sorted(nodes_df.columns.tolist())]
nodes_df.sort_index(inplace=True)
if not branches_df.empty:
branches_df = branches_df.set_index('branch id')
branches_df = branches_df.fillna(0)
branches_df = branches_df[sorted(branches_df.columns.tolist())]
branches_df.sort_index(inplace=True)
return nodes_df, branches_df
|
def calculate_lvgd_voltage_current_stats(nw)
|
LV Voltage and Current Statistics for an arbitrary network
Note
----
Aggregated Load Areas are excluded.
Parameters
----------
nw: :any:`list` of NetworkDing0
The MV grid(s) to be studied
Returns
-------
pandas.DataFrame
nodes_df : Dataframe containing voltage, respectively current, statis
for every critical node, resp. every critical station, in every LV grid
in nw.
pandas.DataFrame
lines_df : Dataframe containing current statistics for every critical
line, in every LV grid in nw.
| 2.043356
| 1.979848
| 1.032077
|
'''Runs ding0 over the districtis selected in mv_grid_districts
It also writes the result in filename. If filename = False,
then the network is not saved.
Parameters
----------
mv_grid_districts: :any:`list` of :obj:`int`
Districts IDs: Defaults to [3545]
filename: str
Defaults to 'ding0_tests_grids_1.pkl'
If filename=False, then the network is not saved
Returns
-------
NetworkDing0
The created MV network.
'''
print('\n########################################')
print(' Running ding0 for district', mv_grid_districts)
# database connection/ session
engine = db.connection(section='oedb')
session = sessionmaker(bind=engine)()
# instantiate new ding0 network object
nd = NetworkDing0(name='network')
# run DINGO on selected MV Grid District
nd.run_ding0(session=session, mv_grid_districts_no=mv_grid_districts)
# export grid to file (pickle)
if filename:
print('\n########################################')
print(' Saving result in ', filename)
save_nd_to_pickle(nd, filename=filename)
print('\n########################################')
return nd
|
def init_mv_grid(mv_grid_districts=[3545], filename='ding0_tests_grids_1.pkl')
|
Runs ding0 over the districtis selected in mv_grid_districts
It also writes the result in filename. If filename = False,
then the network is not saved.
Parameters
----------
mv_grid_districts: :any:`list` of :obj:`int`
Districts IDs: Defaults to [3545]
filename: str
Defaults to 'ding0_tests_grids_1.pkl'
If filename=False, then the network is not saved
Returns
-------
NetworkDing0
The created MV network.
| 5.740098
| 3.012362
| 1.905514
|
# make some plot
plotpath = os.path.join(base_path, 'plots')
results.plot_cable_length(stats, plotpath)
plt.show()
results.plot_generation_over_load(stats, plotpath)
plt.show()
|
def ding0_exemplary_plots(stats, base_path=BASEPATH)
|
Analyze multiple grid district data generated with Ding0.
Parameters
----------
stats : pandas.DataFrame
Statistics of each MV grid districts
base_path : str
Root directory of Ding0 data structure, i.e. '~/.ding0' (which is
default).
| 4.590904
| 5.071285
| 0.905274
|
# load Ding0 data
nds = []
for filename in filenames:
try:
nd_load = results.load_nd_from_pickle(filename=
os.path.join(base_path,
'grids',
filename))
nds.append(nd_load)
except:
print("File {mvgd} not found. It was maybe excluded by Ding0 or "
"just forgotten to generate by you...".format(mvgd=filename))
nd = nds[0]
for n in nds[1:]:
nd.add_mv_grid_district(n._mv_grid_districts[0])
# get statistical numbers about grid
stats = results.calculate_mvgd_stats(nd)
return stats
|
def nd_load_and_stats(filenames, base_path=BASEPATH)
|
Load multiple files from disk and generate stats
Passes the list of files assuming the ding0 data structure as default in
:code:`~/.ding0`.
Data will be concatenated and key indicators for each grid district are
returned in table and graphic format.
Parameters
----------
filenames : list of str
Provide list of files you want to analyze
base_path : str
Root directory of Ding0 data structure, i.e. '~/.ding0' (which is
default).
Returns
-------
stats : pandas.DataFrame
Statistics of each MV grid districts
| 7.649806
| 6.638479
| 1.152343
|
#TODO: finish docstring
# load cable data, file_names and parameter
branch_parameters = grid.network.static_data['MV_cables']
branch_parameters = branch_parameters[branch_parameters['U_n'] == grid.v_level].sort_values('I_max_th')
branch_ctr = 0
for branch, rel_overload in crit_branches.items():
try:
type = branch_parameters.loc[
branch_parameters[
branch_parameters['I_max_th'] >= branch['branch']
.type['I_max_th'] * rel_overload
].loc[
:, 'I_max_th'
].idxmin(), :
]
branch['branch'].type = type
branch_ctr += 1
except:
logger.warning('Branch {} could not be reinforced (current '
'issues) as there is no appropriate cable type '
'available. Original type is retained.'.format(
branch))
pass
if branch_ctr:
logger.info('==> {} branches were reinforced.'.format(str(branch_ctr)))
|
def reinforce_branches_current(grid, crit_branches)
|
Reinforce MV or LV grid by installing a new branch/line type
Parameters
----------
grid : GridDing0
Grid identifier.
crit_branches : dict
Dict of critical branches with max. relative overloading.
Notes
-----
The branch type to be installed is determined per branch using the rel. overloading. According to [#]_
only cables are installed.
References
----------
.. [#] Ackermann et al. (RP VNS)
See Also
--------
ding0.flexopt.check_tech_constraints.check_load :
ding0.flexopt.reinforce_measures.reinforce_branches_voltage :
| 7.415174
| 6.938835
| 1.068648
|
#TODO: finish docstring
# load cable data, file_names and parameter
branch_parameters = grid.network.static_data['{gridlevel}_cables'.format(
gridlevel=grid_level)]
branch_parameters = branch_parameters[branch_parameters['U_n'] == grid.v_level].sort_values('I_max_th')
branch_ctr = 0
for branch in crit_branches:
try:
type = branch_parameters.loc[
branch_parameters.loc[
branch_parameters['I_max_th'] > branch.type['I_max_th']
].loc[
:, 'I_max_th'
].idxmin(), :
]
branch.type = type
branch_ctr += 1
except:
logger.warning('Branch {} could not be reinforced (voltage '
'issues) as there is no appropriate cable type '
'available. Original type is retained.'.format(
branch))
pass
if branch_ctr:
logger.info('==> {} branches were reinforced.'.format(str(branch_ctr)))
|
def reinforce_branches_voltage(grid, crit_branches, grid_level='MV')
|
Reinforce MV or LV grid by installing a new branch/line type
Parameters
----------
grid : GridDing0
Grid identifier.
crit_branches : :any:`list` of :obj:`int`
List of critical branches. #TODO: check if a list or a dictionary
grid_level : str
Specifying either 'MV' for medium-voltage grid or 'LV' for
low-voltage grid level.
Notes
-----
The branch type to be installed is determined per branch - the next larger cable available is used.
According to Ackermann only cables are installed.
See Also
--------
ding0.flexopt.check_tech_constraints.check_load :
ding0.flexopt.reinforce_measures.reinforce_branches_voltage :
| 6.807174
| 6.8325
| 0.996293
|
load_factor_lv_trans_lc_normal = cfg_ding0.get(
'assumptions',
'load_factor_lv_trans_lc_normal')
load_factor_lv_trans_fc_normal = cfg_ding0.get(
'assumptions',
'load_factor_lv_trans_fc_normal')
trafo_params = grid.network._static_data['{grid_level}_trafos'.format(
grid_level=grid_level)]
trafo_s_max_max = max(trafo_params['S_nom'])
for station in critical_stations:
# determine if load or generation case and apply load factor
if station['s_max'][0] > station['s_max'][1]:
case = 'load'
lf_lv_trans_normal = load_factor_lv_trans_lc_normal
else:
case = 'gen'
lf_lv_trans_normal = load_factor_lv_trans_fc_normal
# cumulative maximum power of transformers installed
s_max_trafos = sum([_.s_max_a
for _ in station['station']._transformers])
# determine missing trafo power to solve overloading issue
s_trafo_missing = max(station['s_max']) - (
s_max_trafos * lf_lv_trans_normal)
# list of trafos with rated apparent power below `trafo_s_max_max`
extendable_trafos = [_ for _ in station['station']._transformers
if _.s_max_a < trafo_s_max_max]
# try to extend power of existing trafos
while (s_trafo_missing > 0) and extendable_trafos:
# only work with first of potentially multiple trafos
trafo = extendable_trafos[0]
trafo_s_max_a_before = trafo.s_max_a
# extend power of first trafo to next higher size available
extend_trafo_power(extendable_trafos, trafo_params)
# diminish missing trafo power by extended trafo power and update
# extendable trafos list
s_trafo_missing -= ((trafo.s_max_a * lf_lv_trans_normal) -
trafo_s_max_a_before)
extendable_trafos = [_ for _ in station['station']._transformers
if _.s_max_a < trafo_s_max_max]
# build new trafos inside station until
if s_trafo_missing > 0:
trafo_type, trafo_cnt = select_transformers(grid, s_max={
's_max': s_trafo_missing,
'case': case
})
# create transformers and add them to station of LVGD
for t in range(0, trafo_cnt):
lv_transformer = TransformerDing0(
grid=grid,
id_db=id,
v_level=0.4,
s_max_longterm=trafo_type['S_nom'],
r=trafo_type['R'],
x=trafo_type['X'])
# add each transformer to its station
grid._station.add_transformer(lv_transformer)
logger.info("{stations_cnt} have been reinforced due to overloading "
"issues.".format(stations_cnt=len(critical_stations)))
|
def extend_substation(grid, critical_stations, grid_level)
|
Reinforce MV or LV substation by exchanging the existing trafo and
installing a parallel one if necessary.
First, all available transformers in a `critical_stations` are extended to
maximum power. If this does not solve all present issues, additional
transformers are build.
Parameters
----------
grid: GridDing0
Ding0 grid container
critical_stations : :any:`list`
List of stations with overloading
grid_level : str
Either "LV" or "MV". Basis to select right equipment.
Notes
-----
Curently straight forward implemented for LV stations
Returns
-------
type
#TODO: Description of return. Change type in the previous line accordingly
| 4.594412
| 4.266362
| 1.076892
|
grid = crit_stations[0]['node'].grid
trafo_params = grid.network._static_data['{grid_level}_trafos'.format(
grid_level=grid_level)]
trafo_s_max_max = max(trafo_params['S_nom'])
trafo_min_size = trafo_params.loc[trafo_params['S_nom'].idxmin(), :]
v_diff_max_fc = cfg_ding0.get('assumptions', 'lv_max_v_level_fc_diff_normal')
v_diff_max_lc = cfg_ding0.get('assumptions', 'lv_max_v_level_lc_diff_normal')
tree = nx.dfs_tree(grid._graph, grid._station)
for station in crit_stations:
v_delta = max(station['v_diff'])
# get list of nodes of main branch in right order
extendable_trafos = [_ for _ in station['node']._transformers
if _.s_max_a < trafo_s_max_max]
v_delta_initially_lc = v_delta[0]
v_delta_initially_fc = v_delta[1]
new_transformers_cnt = 0
# extend existing trafo power while voltage issues exist and larger trafos
# are available
while (v_delta[0] > v_diff_max_lc) or (v_delta[1] > v_diff_max_fc):
if extendable_trafos:
# extend power of first trafo to next higher size available
extend_trafo_power(extendable_trafos, trafo_params)
elif new_transformers_cnt < 2:
# build a new transformer
lv_transformer = TransformerDing0(
grid=grid,
id_db=id,
v_level=0.4,
s_max_longterm=trafo_min_size['S_nom'],
r=trafo_min_size['R'],
x=trafo_min_size['X'])
# add each transformer to its station
grid._station.add_transformer(lv_transformer)
new_transformers_cnt += 1
# update break criteria
v_delta = get_voltage_at_bus_bar(grid, tree)
extendable_trafos = [_ for _ in station['node']._transformers
if _.s_max_a < trafo_s_max_max]
if (v_delta[0] == v_delta_initially_lc) or (
v_delta[1] == v_delta_initially_fc):
logger.warning("Extension of {station} has no effect on "
"voltage delta at bus bar. Transformation power "
"extension is halted.".format(
station=station['node']))
break
|
def extend_substation_voltage(crit_stations, grid_level='LV')
|
Extend substation if voltage issues at the substation occur
Follows a two-step procedure:
i) Existing transformers are extended by replacement with large nominal
apparent power
ii) New additional transformers added to substation (see 'Notes')
Parameters
----------
crit_stations : :any:`list`
List of stations with overloading or voltage issues.
grid_level : str
Specifiy grid level: 'MV' or 'LV'
Notes
-----
At maximum 2 new of largest (currently 630 kVA) transformer are additionally
built to resolve voltage issues at MV-LV substation bus bar.
| 4.986248
| 4.851055
| 1.027869
|
unsolved_branches = []
cable_lf = cfg_ding0.get('assumptions',
'load_factor_lv_cable_lc_normal')
cables = grid.network.static_data['LV_cables']
# resolve overloading issues for each branch segment
for branch in crit_branches:
I_max_branch_load = branch['s_max'][0]
I_max_branch_gen = branch['s_max'][1]
I_max_branch = max([I_max_branch_load, I_max_branch_gen])
suitable_cables = cables[(cables['I_max_th'] * cable_lf)
> I_max_branch]
if not suitable_cables.empty:
cable_type = suitable_cables.loc[suitable_cables['I_max_th'].idxmin(), :]
branch['branch'].type = cable_type
crit_branches.remove(branch)
else:
cable_type_max = cables.loc[cables['I_max_th'].idxmax(), :]
unsolved_branches.append(branch)
branch['branch'].type = cable_type_max
logger.error("No suitable cable type could be found for {branch} "
"with I_th_max = {current}. "
"Cable of type {cable} is chosen during "
"reinforcement.".format(
branch=branch['branch'],
cable=cable_type_max.name,
current=I_max_branch
))
return unsolved_branches
|
def reinforce_lv_branches_overloading(grid, crit_branches)
|
Choose appropriate cable type for branches with line overloading
Parameters
----------
grid : LVGridDing0
Ding0 LV grid object
crit_branches : :any:`list`
List of critical branches incl. its line loading
Notes
-----
If maximum size cable is not capable to resolve issue due to line
overloading largest available cable type is assigned to branch.
Returns
-------
:any:`list`
unsolved_branches : List of braches no suitable cable could be found
| 4.4433
| 4.020983
| 1.105028
|
trafo = extendable_trafos[0]
trafo_s_max_a_before = trafo.s_max_a
trafo_nearest_larger = trafo_params.loc[
trafo_params.loc[
trafo_params['S_nom'] > trafo_s_max_a_before
].loc[
:, 'S_nom'
].idxmin(), :
]
trafo.s_max_a = trafo_nearest_larger['S_nom']
trafo.r = trafo_nearest_larger['R']
trafo.x = trafo_nearest_larger['X']
|
def extend_trafo_power(extendable_trafos, trafo_params)
|
Extend power of first trafo in list of extendable trafos
Parameters
----------
extendable_trafos : :any:`list`
Trafos with rated power below maximum size available trafo
trafo_params : :pandas:`pandas.DataFrame<dataframe>`
Transformer parameters
| 3.047221
| 3.138651
| 0.97087
|
if generator not in self._generators and isinstance(generator,
GeneratorDing0):
self._generators.append(generator)
self.graph_add_node(generator)
|
def add_generator(self, generator)
|
Adds a generator to _generators and grid graph if not already existing
Parameters
----------
generator : GridDing0
Description #TODO
| 9.448491
| 4.887859
| 1.933053
|
if ((node_object not in self._graph.nodes()) and
(isinstance(node_object, (StationDing0,
CableDistributorDing0,
LVLoadAreaCentreDing0,
CircuitBreakerDing0,
GeneratorDing0)))):
self._graph.add_node(node_object)
|
def graph_add_node(self, node_object)
|
Adds a station or cable distributor object to grid graph if not already existing
Parameters
----------
node_object : GridDing0
Description #TODO
| 9.427979
| 5.330177
| 1.768793
|
return sorted(self._graph.nodes(), key=lambda _: repr(_))
|
def graph_nodes_sorted(self)
|
Returns an (ascending) sorted list of graph's nodes (name is used as key).
Returns
-------
:any:`list`
Description #TODO check
| 10.917467
| 13.801847
| 0.791015
|
edges = nx.get_edge_attributes(self._graph, 'branch')
nodes = list(edges.keys())[list(edges.values()).index(branch)]
return nodes
|
def graph_nodes_from_branch(self, branch)
|
Returns nodes that are connected by `branch`
Args
----
branch: BranchDing0
Description #TODO
Returns
-------
(:obj:`GridDing0`, :obj:`GridDing0`)
2-tuple of nodes (Ding0 objects) #TODO:Check
| 3.777424
| 4.546659
| 0.830813
|
# TODO: This method can be replaced and speed up by using NetworkX' neighbors()
branches = []
branches_dict = self._graph.adj[node]
for branch in branches_dict.items():
branches.append(branch)
return sorted(branches, key=lambda _: repr(_))
|
def graph_branches_from_node(self, node)
|
Returns branches that are connected to `node`
Args
----
node: GridDing0
Ding0 object (member of graph)
Returns
-------
:any:`list`
List of tuples (node in :obj:`GridDing0`, branch in :obj:`BranchDing0`) ::
(node , branch_0 ),
...,
(node , branch_N ),
| 8.532785
| 10.025211
| 0.851133
|
# get edges with attributes
edges = nx.get_edge_attributes(self._graph, 'branch').items()
# sort them according to connected nodes
edges_sorted = sorted(list(edges), key=lambda _: (''.join(sorted([repr(_[0][0]),repr(_[0][1])]))))
for edge in edges_sorted:
yield {'adj_nodes': edge[0], 'branch': edge[1]}
|
def graph_edges(self)
|
Returns a generator for iterating over graph edges
The edge of a graph is described by the two adjacent node and the branch
object itself. Whereas the branch object is used to hold all relevant
power system parameters.
Yields
------
int
Description #TODO check
Note
----
There are generator functions for nodes (`Graph.nodes()`) and edges
(`Graph.edges()`) in NetworkX but unlike graph nodes, which can be
represented by objects, branch objects can only be accessed by using an
edge attribute ('branch' is used here)
To make access to attributes of the branch objects simpler and more
intuitive for the user, this generator yields a dictionary for each edge
that contains information about adjacent nodes and the branch object.
Note, the construction of the dictionary highly depends on the structure
of the in-going tuple (which is defined by the needs of networkX). If
this changes, the code will break.
| 5.563266
| 4.475753
| 1.242979
|
if (node_source in self._graph.nodes()) and (node_target in self._graph.nodes()):
path = nx.shortest_path(self._graph, node_source, node_target)
else:
raise Exception('At least one of the nodes is not a member of graph.')
if type == 'nodes':
return path
elif type == 'edges':
return [_ for _ in self._graph.edges(nbunch=path, data=True)
if (_[0] in path and _[1] in path)]
else:
raise ValueError('Please specify type as nodes or edges')
|
def find_path(self, node_source, node_target, type='nodes')
|
Determines shortest path
Determines the shortest path from `node_source` to
`node_target` in _graph using networkx' shortest path
algorithm.
Args
----
node_source: GridDing0
source node, member of _graph
node_target: GridDing0
target node, member of _graph
type : str
Specify if nodes or edges should be returned. Default
is `nodes`
Returns
-------
:any:`list` of :obj:`GridDing0`
path: shortest path from `node_source` to `node_target` (list of nodes in _graph)
Notes
-----
WARNING: The shortest path is calculated using the count of hops, not the actual line lengths!
As long as the circuit breakers are open, this works fine since there's only one path. But if
they are closed, there are 2 possible paths. The result is a path which have min. count of hops
but might have a longer total path length than the second sone.
See networkx' function shortest_path() function for details on how the path is calculated.
| 2.420948
| 2.252124
| 1.074962
|
branches = set()
for node_target in nodes_target:
path = self.find_path(node_source, node_target)
node_pairs = list(zip(path[0:len(path) - 1], path[1:len(path)]))
for n1, n2 in node_pairs:
branches.add(self._graph.adj[n1][n2]['branch'])
return list(branches)
|
def find_and_union_paths(self, node_source, nodes_target)
|
Determines shortest paths from `node_source` to all nodes in `node_target` in _graph using find_path().
The branches of all paths are stored in a set - the result is a list of unique branches.
Args
----
node_source: GridDing0
source node, member of _graph
node_target: GridDing0
target node, member of _graph
Returns
-------
:any:`list` of :obj:`BranchDing0`
branches: list of branches (list of nodes in _graph) #TODO:check
| 2.596536
| 2.723397
| 0.953418
|
length = 0
path = self.find_path(node_source, node_target)
node_pairs = list(zip(path[0:len(path)-1], path[1:len(path)]))
for n1, n2 in node_pairs:
length += self._graph.adj[n1][n2]['branch'].length
return length
|
def graph_path_length(self, node_source, node_target)
|
Calculates the absolute distance between `node_source` and `node_target` in meters using find_path() and branches' length attribute.
Args
----
node_source: GridDing0
source node, member of _graph
node_target: GridDing0
target node, member of _graph
Returns
-------
float
path length in m
| 3.241385
| 3.089654
| 1.049109
|
return sorted(nx.isolates(self._graph), key=lambda x: repr(x))
|
def graph_isolated_nodes(self)
|
Finds isolated nodes = nodes with no neighbors (degree zero)
Returns
-------
:any:`list` of :obj:`GridDing0`
List of nodes (Ding0 objects)
| 6.572395
| 10.34041
| 0.635603
|
if transformer not in self.transformers() and isinstance(transformer, TransformerDing0):
self._transformers.append(transformer)
|
def add_transformer(self, transformer)
|
Adds a transformer to _transformers if not already existing
Args
----
transformer : StationDing0
Description #TODO
| 9.734548
| 4.636382
| 2.0996
|
for branch in self._grid.graph_edges():
if branch['branch'].ring == self:
yield branch
|
def branches(self)
|
#TODO: description
| 22.909676
| 19.350052
| 1.183959
|
for lv_load_area in self._grid._graph.nodes():
if isinstance(lv_load_area, LVLoadAreaDing0):
if lv_load_area.ring == self:
yield lv_load_area
|
def lv_load_areas(self)
|
#TODO: description
| 7.902644
| 7.215632
| 1.095212
|
self.branch_nodes = self.grid.graph_nodes_from_branch(self.branch)
self.grid._graph.remove_edge(self.branch_nodes[0], self.branch_nodes[1])
self.status = 'open'
|
def open(self)
|
Open a Circuit Breaker #TODO Check
| 5.52843
| 5.226285
| 1.057812
|
self.grid._graph.add_edge(self.branch_nodes[0], self.branch_nodes[1], branch=self.branch)
self.status = 'closed'
|
def close(self)
|
Close a Circuit Breaker #TODO Check
| 7.493082
| 7.466626
| 1.003543
|
for load_area in sorted(self._lv_load_areas, key=lambda _: repr(_)):
yield load_area
|
def lv_load_areas(self)
|
Returns a generator for iterating over load_areas
Yields
------
int
generator for iterating over load_areas
| 7.938279
| 6.551208
| 1.211727
|
if lv_load_area not in self.lv_load_areas() and isinstance(lv_load_area, LVLoadAreaDing0):
self._lv_load_areas.append(lv_load_area)
self.mv_grid.graph_add_node(lv_load_area.lv_load_area_centre)
|
def add_lv_load_area(self, lv_load_area)
|
Adds a Load Area `lv_load_area` to _lv_load_areas if not already existing
Additionally, adds the associated centre object to MV grid's _graph as node.
Args
----
lv_load_area: LVLoadAreaDing0
instance of class LVLoadAreaDing0
| 4.956326
| 2.367167
| 2.093779
|
if lv_load_area_group not in self.lv_load_area_groups():
self._lv_load_area_groups.append(lv_load_area_group)
|
def add_lv_load_area_group(self, lv_load_area_group)
|
Adds a LV load_area to _lv_load_areas if not already existing.
| 2.045484
| 1.789218
| 1.143228
|
peak_load = peak_load_satellites = 0
for lv_load_area in self.lv_load_areas():
peak_load += lv_load_area.peak_load
if lv_load_area.is_satellite:
peak_load_satellites += lv_load_area.peak_load
self.peak_load = peak_load
self.peak_load_satellites = peak_load_satellites
|
def add_peak_demand(self)
|
Summarizes peak loads of underlying load_areas in kVA.
(peak load sum and peak load of satellites)
| 2.76101
| 1.976872
| 1.396656
|
peak_load_aggregated = 0
for lv_load_area in self.lv_load_areas():
if lv_load_area.is_aggregated:
peak_load_aggregated += lv_load_area.peak_load
self.peak_load_aggregated = peak_load_aggregated
|
def add_aggregated_peak_demand(self)
|
Summarizes peak loads of underlying aggregated load_areas
| 3.134847
| 2.522808
| 1.242603
|
for lv_grid_district in sorted(self._lv_grid_districts, key=lambda _: repr(_)):
yield lv_grid_district
|
def lv_grid_districts(self)
|
Returns a generator for iterating over LV grid districts
Yields
------
int
generator for iterating over LV grid districts
| 5.523403
| 5.387459
| 1.025233
|
# TODO: check docstring
if lv_grid_district not in self._lv_grid_districts and \
isinstance(lv_grid_district, LVGridDistrictDing0):
self._lv_grid_districts.append(lv_grid_district)
|
def add_lv_grid_district(self, lv_grid_district)
|
Adds a LV grid district to _lv_grid_districts if not already existing
Args
----
lv_grid_district: :shapely:`Shapely Polygon object<polygons>`
Descr
| 5.583476
| 6.732447
| 0.829338
|
cum_peak_generation = 0
for lv_grid_district in self._lv_grid_districts:
cum_peak_generation += lv_grid_district.lv_grid.station().peak_generation
return cum_peak_generation
|
def peak_generation(self)
|
Cumulative peak generation of generators connected to LV grids of
underlying LVGDs
| 6.858512
| 4.413432
| 1.554009
|
i, j = pair
return (self._nodes[i.name()], self._nodes[j.name()])
|
def get_pair(self, pair)
|
get pair description
Parameters
----------
pair : :any:`list` of nodes
Descr
Returns
-------
type
Descr
| 7.77844
| 11.375124
| 0.683811
|
return all(
[node.route_allocation() is not None for node in list(self._nodes.values()) if node != self._problem.depot()]
)
|
def is_complete(self)
|
Returns True if this is a complete solution, i.e, all nodes are allocated
Returns
-------
bool
True if all nodes are llocated.
| 14.015121
| 13.059047
| 1.073212
|
new_solution = self.__class__(self._problem, len(self._routes))
# Clone routes
for index, r in enumerate(self._routes):
new_route = new_solution._routes[index]
for node in r.nodes():
# Insere new node on new route
new_node = new_solution._nodes[node]
new_route.allocate([new_node])
return new_solution
|
def clone(self)
|
Returns a deep copy of self
Function clones:
* route
* allocation
* nodes
Returns
-------
type
Deep copy of self
| 5.31052
| 4.918532
| 1.079696
|
length = 0
for r in self._routes:
length = length + r.length()
return length
|
def length(self)
|
Returns the solution length (or cost)
Returns
-------
float
Solution length (or cost).
| 6.011125
| 8.171841
| 0.73559
|
g = nx.Graph()
ntemp = []
nodes_pos = {}
demands = {}
demands_pos = {}
for no, node in self._nodes.items():
g.add_node(node)
ntemp.append(node)
coord = self._problem._coord[no]
nodes_pos[node] = tuple(coord)
demands[node] = 'd=' + str(node.demand())
demands_pos[node] = tuple([a+b for a, b in zip(coord, [2.5]*len(coord))])
depot = self._nodes[self._problem._depot._name]
for r in self.routes():
n1 = r._nodes[0:len(r._nodes)-1]
n2 = r._nodes[1:len(r._nodes)]
e = list(zip(n1, n2))
e.append((depot, r._nodes[0]))
e.append((r._nodes[-1], depot))
g.add_edges_from(e)
plt.figure()
ax = plt.gca()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if anim is not None:
nx.draw_networkx(g, nodes_pos, with_labels=False, node_size=50)
plt.savefig(anim.file_path +
anim.file_prefix +
(4 - len(str(anim.counter))) * '0' +
str(anim.counter) + '.png',
dpi=150,
bbox_inches='tight')
anim.counter += 1
plt.close()
else:
nx.draw_networkx(g, nodes_pos)
nx.draw_networkx_labels(g, demands_pos, labels=demands)
plt.show()
|
def draw_network(self, anim)
|
Draws solution's graph using networkx
Parameters
----------
AnimationDing0
AnimationDing0 object
| 2.605197
| 2.576071
| 1.011306
|
branches = []
polygon_shp = transform(proj, polygon)
for branch in mv_grid.graph_edges():
nodes = branch['adj_nodes']
branch_shp = transform(proj, LineString([nodes[0].geo_data, nodes[1].geo_data]))
# check if branches intersect with polygon if mode = 'intersects'
if mode == 'intersects':
if polygon_shp.intersects(branch_shp):
branches.append(branch)
# check if polygon contains branches if mode = 'contains'
elif mode == 'contains':
if polygon_shp.contains(branch_shp):
branches.append(branch)
# error
else:
raise ValueError('Mode is invalid!')
return branches
|
def calc_geo_branches_in_polygon(mv_grid, polygon, mode, proj)
|
Calculate geographical branches in polygon.
For a given `mv_grid` all branches (edges in the graph of the grid) are
tested if they are in the given `polygon`. You can choose different modes
and projections for this operation.
Parameters
----------
mv_grid : MVGridDing0
MV Grid object. Edges contained in `mv_grid.graph_edges()` are taken
for the test.
polygon : :shapely:`Shapely Point object<points>`
Polygon that contains edges.
mode : str
Choose between 'intersects' or 'contains'.
proj : int
EPSG code to specify projection
Returns
-------
:any:`list` of :any:`BranchDing0` objects
List of branches
| 3.077422
| 2.857816
| 1.076844
|
branches = []
while not branches:
node_shp = transform(proj, node.geo_data)
buffer_zone_shp = node_shp.buffer(radius)
for branch in mv_grid.graph_edges():
nodes = branch['adj_nodes']
branch_shp = transform(proj, LineString([nodes[0].geo_data, nodes[1].geo_data]))
if buffer_zone_shp.intersects(branch_shp):
branches.append(branch)
radius += radius_inc
return branches
|
def calc_geo_branches_in_buffer(node, mv_grid, radius, radius_inc, proj)
|
Determines branches in nodes' associated graph that are at least partly
within buffer of `radius` from `node`.
If there are no nodes, the buffer is successively extended by `radius_inc`
until nodes are found.
Parameters
----------
node : LVStationDing0, GeneratorDing0, or CableDistributorDing0
origin node (e.g. LVStationDing0 object) with associated shapely object
(attribute `geo_data`) in any CRS (e.g. WGS84)
radius : float
buffer radius in m
radius_inc : float
radius increment in m
proj : int
pyproj projection object: nodes' CRS to equidistant CRS
(e.g. WGS84 -> ETRS)
Returns
-------
:any:`list` of :networkx:`NetworkX Graph Obj< >`
List of branches (NetworkX branch objects)
| 3.692906
| 3.363073
| 1.098075
|
branch_detour_factor = cfg_ding0.get('assumptions', 'branch_detour_factor')
# notice: vincenty takes (lat,lon)
branch_length = branch_detour_factor * vincenty((node_source.geo_data.y, node_source.geo_data.x),
(node_target.geo_data.y, node_target.geo_data.x)).m
# ========= BUG: LINE LENGTH=0 WHEN CONNECTING GENERATORS ===========
# When importing generators, the geom_new field is used as position. If it is empty, EnergyMap's geom
# is used and so there are a couple of generators at the same position => length of interconnecting
# line is 0. See issue #76
if branch_length == 0:
branch_length = 1
logger.warning('Geo distance is zero, check objects\' positions. '
'Distance is set to 1m')
# ===================================================================
return branch_length
|
def calc_geo_dist_vincenty(node_source, node_target)
|
Calculates the geodesic distance between `node_source` and `node_target`
incorporating the detour factor specified in :file:`ding0/ding0/config/config_calc.cfg`.
Parameters
----------
node_source: LVStationDing0, GeneratorDing0, or CableDistributorDing0
source node, member of GridDing0._graph
node_target: LVStationDing0, GeneratorDing0, or CableDistributorDing0
target node, member of GridDing0._graph
Returns
-------
:any:`float`
Distance in m
| 10.581438
| 9.380624
| 1.12801
|
branch_detour_factor = cfg_ding0.get('assumptions', 'branch_detour_factor')
matrix = {}
for i in nodes_pos:
pos_origin = tuple(nodes_pos[i])
matrix[i] = {}
for j in nodes_pos:
pos_dest = tuple(nodes_pos[j])
# notice: vincenty takes (lat,lon), thus the (x,y)/(lon,lat) tuple is reversed
distance = branch_detour_factor * vincenty(tuple(reversed(pos_origin)), tuple(reversed(pos_dest))).km
matrix[i][j] = distance
return matrix
|
def calc_geo_dist_matrix_vincenty(nodes_pos)
|
Calculates the geodesic distance between all nodes in `nodes_pos` incorporating the detour factor in config_calc.cfg.
For every two points/coord it uses geopy's vincenty function (formula devised by Thaddeus Vincenty,
with an accurate ellipsoidal model of the earth). As default ellipsoidal model of the earth WGS-84 is used.
For more options see
https://geopy.readthedocs.org/en/1.10.0/index.html?highlight=vincenty#geopy.distance.vincenty
Parameters
----------
nodes_pos: dict
dictionary of nodes with positions, with x=longitude, y=latitude, and the following format::
{
'node_1': (x_1, y_1),
...,
'node_n': (x_n, y_n)
}
Returns
-------
:obj:`dict`
dictionary with distances between all nodes (in km), with the following format::
{
'node_1': {'node_1': dist_11, ..., 'node_n': dist_1n},
...,
'node_n': {'node_1': dist_n1, ..., 'node_n': dist_nn}
}
| 5.362046
| 5.07497
| 1.056567
|
proj_source = partial(
pyproj.transform,
pyproj.Proj(init='epsg:4326'), # source coordinate system
pyproj.Proj(init='epsg:3035')) # destination coordinate system
# ETRS (equidistant) to WGS84 (conformal) projection
proj_target = partial(
pyproj.transform,
pyproj.Proj(init='epsg:3035'), # source coordinate system
pyproj.Proj(init='epsg:4326')) # destination coordinate system
branch_shp = transform(proj_source, LineString([node_source.geo_data, node_target.geo_data]))
distance = vincenty((node_source.geo_data.y, node_source.geo_data.x),
(node_target.geo_data.y, node_target.geo_data.x)).m
centre_point_shp = transform(proj_target, branch_shp.interpolate(distance/2))
return centre_point_shp
|
def calc_geo_centre_point(node_source, node_target)
|
Calculates the geodesic distance between `node_source` and `node_target`
incorporating the detour factor specified in config_calc.cfg.
Parameters
----------
node_source: LVStationDing0, GeneratorDing0, or CableDistributorDing0
source node, member of GridDing0._graph
node_target: LVStationDing0, GeneratorDing0, or CableDistributorDing0
target node, member of GridDing0._graph
Returns
-------
:any:`float`
Distance in m.
| 2.408222
| 2.567566
| 0.937939
|
# backup kind and type of branch
branch_kind = graph.adj[node][target_obj_result]['branch'].kind
branch_type = graph.adj[node][target_obj_result]['branch'].type
branch_ring = graph.adj[node][target_obj_result]['branch'].ring
graph.remove_edge(node, target_obj_result)
if isinstance(target_obj_result, MVCableDistributorDing0):
neighbor_nodes = list(graph.neighbors(target_obj_result))
if len(neighbor_nodes) == 2:
node.grid.remove_cable_distributor(target_obj_result)
branch_length = calc_geo_dist_vincenty(neighbor_nodes[0], neighbor_nodes[1])
graph.add_edge(neighbor_nodes[0], neighbor_nodes[1], branch=BranchDing0(length=branch_length,
kind=branch_kind,
type=branch_type,
ring=branch_ring))
if debug:
logger.debug('disconnect edge {0}-{1}'.format(node, target_obj_result))
|
def disconnect_node(node, target_obj_result, graph, debug)
|
Disconnects `node` from `target_obj`
Args
----
node: LVLoadAreaCentreDing0, i.e.
Origin node - Ding0 graph object (e.g. LVLoadAreaCentreDing0)
target_obj_result: LVLoadAreaCentreDing0, i.e.
Origin node - Ding0 graph object (e.g. LVLoadAreaCentreDing0)
graph: :networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes and newly created branches
debug: bool
If True, information is printed during process
| 3.832967
| 3.284954
| 1.166825
|
for branch in mv_grid.graph_edges():
if branch['branch'].kind is None:
branch['branch'].kind = mv_grid.default_branch_kind
if branch['branch'].type is None:
branch['branch'].type = mv_grid.default_branch_type
|
def parametrize_lines(mv_grid)
|
Set unparametrized branches to default branch type
Args
----
mv_grid: MVGridDing0
MV grid instance
Notes
-----
During the connection process of satellites, new branches are created -
these have to be parametrized.
| 4.041998
| 3.173612
| 1.273627
|
# get power factor for loads
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
specs = {}
nodes_demands = {}
nodes_pos = {}
nodes_agg = {}
# check if there are only load areas of type aggregated and satellite
# -> treat satellites as normal load areas (allow for routing)
satellites_only = True
for node in graph.nodes():
if isinstance(node, LVLoadAreaCentreDing0):
if not node.lv_load_area.is_satellite and not node.lv_load_area.is_aggregated:
satellites_only = False
for node in graph.nodes():
# station is LV station
if isinstance(node, LVLoadAreaCentreDing0):
# only major stations are connected via MV ring
# (satellites in case of there're only satellites in grid district)
if not node.lv_load_area.is_satellite or satellites_only:
# get demand and position of node
# convert node's demand to int for performance purposes and to avoid that node
# allocation with subsequent deallocation results in demand<0 due to rounding errors.
nodes_demands[str(node)] = int(node.lv_load_area.peak_load / cos_phi_load)
nodes_pos[str(node)] = (node.geo_data.x, node.geo_data.y)
# get aggregation flag
if node.lv_load_area.is_aggregated:
nodes_agg[str(node)] = True
else:
nodes_agg[str(node)] = False
# station is MV station
elif isinstance(node, MVStationDing0):
nodes_demands[str(node)] = 0
nodes_pos[str(node)] = (node.geo_data.x, node.geo_data.y)
specs['DEPOT'] = str(node)
specs['BRANCH_KIND'] = node.grid.default_branch_kind
specs['BRANCH_TYPE'] = node.grid.default_branch_type
specs['V_LEVEL'] = node.grid.v_level
specs['NODE_COORD_SECTION'] = nodes_pos
specs['DEMAND'] = nodes_demands
specs['MATRIX'] = calc_geo_dist_matrix_vincenty(nodes_pos)
specs['IS_AGGREGATED'] = nodes_agg
return specs
|
def ding0_graph_to_routing_specs(graph)
|
Build data dictionary from graph nodes for routing (translation)
Args
----
graph: :networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes
Returns
-------
:obj:`dict`
Data dictionary for routing.
See Also
--------
ding0.grid.mv_grid.models.models.Graph : for keys of return dict
| 5.019336
| 4.942732
| 1.015498
|
# TODO: check docstring
# TODO: Implement debug mode (pass to solver) to get more information while routing (print routes, draw network, ..)
# translate DING0 graph to routing specs
specs = ding0_graph_to_routing_specs(graph)
# create routing graph using specs
RoutingGraph = Graph(specs)
timeout = 30000
# create solver objects
savings_solver = savings.ClarkeWrightSolver()
local_search_solver = local_search.LocalSearchSolver()
start = time.time()
# create initial solution using Clarke and Wright Savings methods
savings_solution = savings_solver.solve(RoutingGraph, timeout, debug, anim)
# OLD, MAY BE USED LATER - Guido, please don't declare a variable later=now() :) :
#if not savings_solution.is_complete():
# print('=== Solution is not a complete solution! ===')
if debug:
logger.debug('ClarkeWrightSolver solution:')
util.print_solution(savings_solution)
logger.debug('Elapsed time (seconds): {}'.format(time.time() - start))
#savings_solution.draw_network()
# improve initial solution using local search
local_search_solution = local_search_solver.solve(RoutingGraph, savings_solution, timeout, debug, anim)
# this line is for debug plotting purposes:
#local_search_solution = savings_solution
if debug:
logger.debug('Local Search solution:')
util.print_solution(local_search_solution)
logger.debug('Elapsed time (seconds): {}'.format(time.time() - start))
#local_search_solution.draw_network()
return routing_solution_to_ding0_graph(graph, local_search_solution)
|
def solve(graph, debug=False, anim=None)
|
Do MV routing for given nodes in `graph`.
Translate data from node objects to appropriate format before.
Args
----
graph: :networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes
debug: bool, defaults to False
If True, information is printed while routing
anim: AnimationDing0
AnimationDing0 object
Returns
-------
:networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes and edges
See Also
--------
ding0.tools.animation.AnimationDing0 : for a more detailed description on anim parameter.
| 5.30019
| 5.185751
| 1.022068
|
I_max_load = nom_power / (3 ** 0.5 * nom_voltage)
# determine suitable cable for this current
suitable_cables = avail_cables[avail_cables['I_max_th'] > I_max_load]
if not suitable_cables.empty:
cable_type = suitable_cables.loc[suitable_cables['I_max_th'].idxmin(), :]
else:
cable_type = avail_cables.loc[avail_cables['I_max_th'].idxmax(), :]
return cable_type
|
def cable_type(nom_power, nom_voltage, avail_cables)
|
Determine suitable type of cable for given nominal power
Based on maximum occurring current which is derived from nominal power
(either peak load or max. generation capacity) a suitable cable type is
chosen. Thus, no line overloading issues should occur.
Parameters
----------
nom_power : float
Nominal power of generators or loads connected via a cable
nom_voltage : float
Nominal voltage in kV
avail_cables : :pandas:`pandas.DataFrame<dataframe>`
Available cable types including it's electrical parameters
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Parameters of cable type
| 3.030333
| 2.9481
| 1.027894
|
logging.info('Handling duplications for "%s"', file_path)
f = open_strings_file(file_path, "r+")
header_comment_key_value_tuples = extract_header_comment_key_value_tuples_from_file(f)
file_elements = []
section_file_elements = []
keys_to_objects = {}
duplicates_found = []
for header_comment, comments, key, value in header_comment_key_value_tuples:
if len(header_comment) > 0:
# New section - Appending the last section entries, sorted by comment
for elem in sorted(section_file_elements, key=lambda x: x.comments[0]):
file_elements.append(elem)
section_file_elements = []
file_elements.append(Comment(header_comment))
if key in keys_to_objects:
keys_to_objects[key].add_comments(comments)
duplicates_found.append(key)
else:
loc_obj = LocalizationEntry(comments, key, value)
keys_to_objects[key] = loc_obj
section_file_elements.append(loc_obj)
# Adding last section
for elem in sorted(section_file_elements, key=lambda x: x.comments[0]):
file_elements.append(elem)
f.seek(0)
for element in file_elements:
f.write(unicode(element))
f.write(u"\n")
f.truncate()
f.close()
logging.info("Omitted %d duplicates (%s)" % (len(duplicates_found), ",".join(duplicates_found)))
logging.info('Finished handling duplications for "%s"', file_path)
|
def handle_duplications(file_path)
|
Omits the duplications in the strings files.
Keys that appear more than once, will be joined to one appearance and the omit will be documented.
Args:
file_path (str): The path to the strings file.
| 2.837529
| 2.757862
| 1.028887
|
for resource in self._client._resources:
# set the name param, the keys now have / in them
potion_resource = self._client._resources[resource]
try:
oc_cls = _model_lookup[resource]
oc_cls._api = self
oc_cls._resource = potion_resource
setattr(self, oc_cls.__name__, oc_cls)
except KeyError: # Ignore resources we don't explicitly model
pass
|
def _copy_resources(self)
|
Copy all of the resources over to the toplevel client
-return: populates self with a pointer to each ._client.Resource
| 9.206866
| 7.792552
| 1.181496
|
username = click.prompt("Please enter your One Codex (email)")
if api_key is not None:
return username, api_key
password = click.prompt("Please enter your password (typing will be hidden)", hide_input=True)
# now get the API key
api_key = fetch_api_key_from_uname(username, password, server)
return username, api_key
|
def login_uname_pwd(server, api_key=None)
|
Prompts user for username and password, gets API key from server
if not provided.
| 4.92258
| 4.800658
| 1.025397
|
# fetch_api_key and check_version expect server to end in /
if server[-1] != "/":
server = server + "/"
# creds file path setup
if creds_file is None:
creds_file = os.path.expanduser("~/.onecodex")
# check if the creds file exists and is readable
if not os.path.exists(creds_file):
if silent:
return None
creds = {}
elif not os.access(creds_file, os.R_OK):
click.echo("Please check the permissions on {}".format(collapse_user(creds_file)), err=True)
sys.exit(1)
else:
# it is, so let's read it!
with open(creds_file, "r") as fp:
try:
creds = json.load(fp)
except ValueError:
click.echo(
"Your ~/.onecodex credentials file appears to be corrupted. " # noqa
"Please delete it and re-authorize.",
err=True,
)
sys.exit(1)
# check for updates if logged in more than one day ago
last_update = creds.get("updated_at") or creds.get("saved_at")
last_update = last_update if last_update else datetime.datetime.now().strftime(DATE_FORMAT)
diff = datetime.datetime.now() - datetime.datetime.strptime(last_update, DATE_FORMAT)
if diff.days >= 1:
# if creds_file is old, check for updates
upgrade_required, msg = check_version(__version__, server)
creds["updated_at"] = datetime.datetime.now().strftime(DATE_FORMAT)
try:
json.dump(creds, open(creds_file, "w"))
except Exception as e:
if e.errno == errno.EACCES:
click.echo(
"Please check the permissions on {}".format(collapse_user(creds_file)),
err=True,
)
sys.exit(1)
else:
raise
if upgrade_required:
click.echo("\nWARNING: {}\n".format(msg), err=True)
# finally, give the user back what they want (whether silent or not)
if silent:
return creds.get("api_key", None)
click.echo(
"Credentials file already exists ({}). Logout first.".format(collapse_user(creds_file)),
err=True,
)
return creds.get("email", None)
# creds_file was not found and we're not silent, so prompt user to login
email, api_key = login_uname_pwd(server, api_key=api_key)
if api_key is None:
click.echo(
"We could not verify your credentials. Either you mistyped your email "
"or password, or your account does not currently have API access. "
"Please contact help@onecodex.com if you continue to experience problems."
)
sys.exit(1)
creds.update(
{
"api_key": api_key,
"saved_at": datetime.datetime.now().strftime(DATE_FORMAT),
"updated_at": None,
"email": email,
}
)
try:
json.dump(creds, open(creds_file, "w"))
except Exception as e:
if e.errno == errno.EACCES:
click.echo("Please check the permissions on {}".format(creds_file), err=True)
sys.exit(1)
else:
raise
click.echo("Your ~/.onecodex credentials file was successfully created.", err=True)
return email
|
def _login(server, creds_file=None, api_key=None, silent=False)
|
Login main function
| 2.739458
| 2.728225
| 1.004117
|
if creds_file is None:
creds_file = os.path.expanduser("~/.onecodex")
try:
os.remove(creds_file)
except Exception as e:
if e.errno == errno.ENOENT:
return False
elif e.errno == errno.EACCES:
click.echo(
"Please check the permissions on {}".format(collapse_user(creds_file)), err=True
)
sys.exit(1)
else:
raise
return True
|
def _remove_creds(creds_file=None)
|
Remove ~/.onecodex file, returning True if successul or False if the file didn't exist
| 2.828179
| 2.309907
| 1.224369
|
if _remove_creds(creds_file=creds_file):
click.echo("Successfully removed One Codex credentials.", err=True)
sys.exit(0)
else:
click.echo("No One Codex API keys found.", err=True)
sys.exit(1)
|
def _logout(creds_file=None)
|
Logout main function, just rm ~/.onecodex more or less
| 4.038608
| 2.951015
| 1.368549
|
@wraps(fn)
def login_wrapper(ctx, *args, **kwargs):
base_url = os.environ.get("ONE_CODEX_API_BASE", "https://app.onecodex.com")
api_kwargs = {"telemetry": ctx.obj["TELEMETRY"]}
api_key_prior_login = ctx.obj.get("API_KEY")
bearer_token_env = os.environ.get("ONE_CODEX_BEARER_TOKEN")
api_key_env = os.environ.get("ONE_CODEX_API_KEY")
api_key_creds_file = _login(base_url, silent=True)
if api_key_prior_login is not None:
api_kwargs["api_key"] = api_key_prior_login
elif bearer_token_env is not None:
api_kwargs["bearer_token"] = bearer_token_env
elif api_key_env is not None:
api_kwargs["api_key"] = api_key_env
elif api_key_creds_file is not None:
api_kwargs["api_key"] = api_key_creds_file
else:
click.echo(
"The command you specified requires authentication. Please login first.\n", err=True
)
ctx.exit()
ctx.obj["API"] = Api(**api_kwargs)
return fn(ctx, *args, **kwargs)
return login_wrapper
|
def login_required(fn)
|
Requires login before proceeding, but does not prompt the user to login. Decorator should
be used only on Click CLI commands.
Notes
-----
Different means of authentication will be attempted in this order:
1. An API key present in the Click context object from a previous successful authentication.
2. A bearer token (ONE_CODEX_BEARER_TOKEN) in the environment.
3. An API key (ONE_CODEX_API_KEY) in the environment.
4. An API key in the credentials file (~/.onecodex).
| 2.493664
| 2.095085
| 1.190245
|
if json is True:
return self._results()
else:
return self._table()
|
def results(self, json=True)
|
Returns the complete results table for the classification.
Parameters
----------
json : bool, optional
Return result as JSON? Default True.
Returns
-------
table : dict | DataFrame
Return a JSON object with the classification results or a Pandas DataFrame
if json=False.
| 6.001199
| 8.472309
| 0.708331
|
# TODO: Consider removing this method... since it's kind of trivial
# May want to replace with something that actually gets genome-size adjusted
# abundances from the results table
if ids is None:
# get the data frame
return self.table()
else:
res = self.table()
return res[res["tax_id"].isin(ids)]
|
def abundances(self, ids=None)
|
Query the results table to get abundance data for all or some tax ids
| 12.489598
| 10.806844
| 1.155712
|
super(Samples, self).save()
if self.metadata is not None:
self.metadata.save()
|
def save(self)
|
Persist changes on this Samples object back to the One Codex server along with any changes
on its metadata (if it has any).
| 6.085618
| 3.003932
| 2.025884
|
res = cls._resource
if not isinstance(files, string_types) and not isinstance(files, tuple):
raise OneCodexException(
"Please pass a string or tuple or forward and reverse filepaths."
)
if not isinstance(project, Projects) and project is not None:
project_search = Projects.get(project)
if not project_search:
project_search = Projects.where(name=project)
if not project_search:
try:
project_search = Projects.where(project_name=project)
except HTTPError:
project_search = None
if not project_search:
raise OneCodexException("{} is not a valid project UUID".format(project))
if isinstance(project_search, list):
project = project_search[0]
sample_id = upload_sequence(
files,
res._client.session,
res,
metadata=metadata,
tags=tags,
project=project,
coerce_ascii=coerce_ascii,
progressbar=progressbar,
)
return cls.get(sample_id)
|
def upload(
cls, files, metadata=None, tags=None, project=None, coerce_ascii=False, progressbar=None
)
|
Uploads a series of files to the One Codex server.
Parameters
----------
files : `string` or `tuple`
A single path to a file on the system, or a tuple containing a pairs of paths. Tuple
values will be interleaved as paired-end reads and both files should contain the same
number of records. Paths to single files will be uploaded as-is.
metadata : `dict`, optional
tags : `list`, optional
project : `string`, optional
UUID of project to associate this sample with.
coerce_ascii : `bool`, optional
If true, rename unicode filenames to ASCII and issue warning.
progressbar : `click.progressbar`, optional
If passed, display a progress bar using Click.
Returns
-------
A `Samples` object upon successful upload. None if the upload failed.
| 3.27581
| 3.154845
| 1.038343
|
for comment in comments:
if comment not in self.comments and len(comment) > 0:
self.comments.append(comment)
if len(self.comments[0]) == 0:
self.comments.pop(0)
|
def add_comments(self, comments)
|
Add comments to the localization entry
Args:
comments (list of str): The comments to be added to the localization entry.
| 2.524955
| 2.725772
| 0.926327
|
logging.info("Merging translations")
for lang_dir in os.listdir(localization_bundle_path):
if lang_dir == DEFAULT_LANGUAGE_DIRECTORY_NAME:
continue
for translated_path in glob.glob(os.path.join(localization_bundle_path, lang_dir, "*" + TRANSLATED_SUFFIX)):
strings_path = translated_path[:-1 * len(TRANSLATED_SUFFIX)]
localizable_path = os.path.join(localization_bundle_path,
DEFAULT_LANGUAGE_DIRECTORY_NAME,
os.path.basename(strings_path))
localization_merge_back(localizable_path, strings_path, translated_path, strings_path)
|
def merge_translations(localization_bundle_path)
|
Merges the new translation with the old one.
The translated files are saved as '.translated' file, and are merged with old translated file.
Args:
localization_bundle_path (str): The path to the localization bundle.
| 2.969527
| 3.082527
| 0.963342
|
# TODO: Refactor into an Exception class
error_code = resp.status_code
if error_code == 402:
error_message = (
"Please add a payment method to upload more samples. If you continue to "
"experience problems, contact us at help@onecodex.com for assistance."
)
elif error_code == 403:
error_message = "Please login to your One Codex account or pass the appropriate API key."
else:
try:
error_json = resp.json()
except ValueError:
error_json = {}
if "msg" in error_json:
error_message = error_json["msg"].rstrip(".")
elif "message" in error_json:
error_message = error_json["message"].rstrip(".")
else:
error_message = None
if state == "init" and not error_message:
error_message = (
"Could not initialize upload. Are you logged in? If this problem "
"continues, please contact help@onecodex.com for assistance."
)
elif state == "upload" and not error_message:
error_message = (
"File could not be uploaded. If this problem continues, please contact "
"help@onecodex.com for assistance."
)
elif state == "callback" and not error_message:
error_message = (
"Callback could not be completed. If this problem continues, please "
"contact help@onecodex.com for assistance."
)
if error_message is None:
error_message = "Upload failed. Please contact help@onecodex.com for assistance."
raise UploadException(error_message)
|
def raise_api_error(resp, state=None)
|
Raise an exception with a pretty message in various states of upload
| 2.701721
| 2.611308
| 1.034623
|
# Special case validation errors
if len(err_json) == 1 and "validationOf" in err_json[0]:
required_fields = ", ".join(err_json[0]["validationOf"]["required"])
return "Validation error. Requires properties: {}.".format(required_fields)
# General error handling
msg = "; ".join(err.get("message", "") for err in err_json)
# Fallback
if not msg:
msg = "Bad request."
return msg
|
def pretty_print_error(err_json)
|
Pretty print Flask-Potion error messages for the user.
| 4.538056
| 4.617345
| 0.982828
|
if include_references:
return json.dumps(self._resource._properties, cls=PotionJSONEncoder)
else:
return json.dumps(
{
k: v
for k, v in self._resource._properties.items()
if not isinstance(v, Resource) and not k.startswith("$")
},
cls=PotionJSONEncoder,
)
|
def _to_json(self, include_references=True)
|
Convert the model to JSON using the PotionJSONEncode and automatically
resolving the resource as needed (`_properties` call handles this).
| 3.067401
| 2.144631
| 1.43027
|
return cls.where(sort=sort, limit=limit)
|
def all(cls, sort=None, limit=None)
|
Returns all objects of this type. Alias for where() (without filter arguments).
See `where` for documentation on the `sort` and `limit` parameters.
| 6.556911
| 5.158731
| 1.271032
|
check_bind(cls)
# do this here to avoid passing this on to potion
filter_func = keyword_filters.pop("filter", None)
public = False
if any(x["rel"] == "instances_public" for x in cls._resource._schema["links"]):
public = keyword_filters.pop("public", False)
instances_route = keyword_filters.pop(
"_instances", "instances" if not public else "instances_public"
)
schema = next(l for l in cls._resource._schema["links"] if l["rel"] == instances_route)
sort_schema = schema["schema"]["properties"]["sort"]["properties"]
where_schema = schema["schema"]["properties"]["where"]["properties"]
sort = generate_potion_sort_clause(keyword_filters.pop("sort", None), sort_schema)
limit = keyword_filters.pop("limit", None if not public else 1000)
where = {}
# we're filtering by fancy objects (like SQLAlchemy's filter)
if len(filters) > 0:
if len(filters) == 1 and isinstance(filters[0], dict):
where = filters[0]
elif all(isinstance(f, six.string_types) for f in filters):
# if it's a list of strings, treat it as an multiple "get" request
where = {"$uri": {"$in": [cls._convert_id_to_uri(f) for f in filters]}}
else:
# we're doing some more advanced filtering
raise NotImplementedError("Advanced filtering hasn't been implemented yet")
# we're filtering by keyword arguments (like SQLAlchemy's filter_by)
if len(keyword_filters) > 0:
for k, v in generate_potion_keyword_where(keyword_filters, where_schema, cls).items():
if k in where:
raise AttributeError("Multiple definitions for same field {}".format(k))
where[k] = v
# the potion-client method returns an iterator (which lazily fetchs the records
# using `per_page` instances per request) so for limiting we only want to fetch the first
# n (and not instantiate all the available which is what would happen if we just sliced)
cursor = getattr(cls._resource, instances_route)(
where=where, sort=sort, per_page=DEFAULT_PAGE_SIZE
)
if limit is not None:
cursor = itertools.islice(cursor, limit)
# finally, apply local filtering function on objects before returning
wrapped = [cls(_resource=r) for r in cursor]
if filter_func:
if callable(filter_func):
wrapped = [obj for obj in wrapped if filter_func(obj) is True]
else:
raise OneCodexException(
"Expected callable for filter, got: {}".format(type(filter_func).__name__)
)
return wrapped
|
def where(cls, *filters, **keyword_filters)
|
Retrieves objects (Samples, Classifications, etc.) from the One Codex server.
Parameters
----------
filters : objects
Advanced filters to use (not implemented)
sort : string | list, optional
Sort the results by this field (or list of fields). By default in descending order,
but if any of the fields start with the special character ^, sort in ascending order.
For example, sort=['size', '^filename'] will sort by size from largest to smallest and
filename from A-Z for items with the same size.
limit : integer, optional
Number of records to return. For smaller searches, this can reduce the number of
network requests made.
keyword_filters : strings | objects
Filter the results by specific keywords (or filter objects, in advanced usage)
Examples
--------
You can filter objects that are returned locally using a lambda function:
# returns only samples with a filename ending in '.gz'
my_samples = Samples.where(filter=lambda s: s.filename.endswith('.gz'))
Returns
-------
list
A list of all objects matching these filters. If no filters are passed, this
matches all objects.
| 4.577348
| 4.724544
| 0.968844
|
check_bind(cls)
# we're just retrieving one object from its uuid
try:
resource = cls._resource.fetch(uuid)
if isinstance(resource, list):
# TODO: Investigate why potion .fetch()
# method is occassionally returning a list here...
if len(resource) == 1:
resource = resource[0]
else:
raise TypeError("Potion-Client error in fetching resource")
except HTTPError as e:
# 404 error means this doesn't exist
if e.response.status_code == 404:
return None
else:
raise e
return cls(_resource=resource)
|
def get(cls, uuid)
|
Retrieve one specific object from the server by its UUID (unique 16-character id). UUIDs
can be found in the web browser's address bar while viewing analyses and other objects.
Parameters
----------
uuid : string
UUID of the object to retrieve.
Returns
-------
OneCodexBase | None
The object with that UUID or None if no object could be found.
Examples
--------
>>> api.Samples.get('xxxxxxxxxxxxxxxx')
<Sample xxxxxxxxxxxxxxxx>
| 6.378347
| 7.510483
| 0.849259
|
check_bind(self)
if self.id is None:
raise ServerError("{} object does not exist yet".format(self.__class__.name))
elif not self.__class__._has_schema_method("destroy"):
raise MethodNotSupported("{} do not support deletion.".format(self.__class__.__name__))
try:
self._resource.delete()
except HTTPError as e:
if e.response.status_code == 403:
raise PermissionDenied("") # FIXME: is this right?
else:
raise e
|
def delete(self)
|
Delete this object from the One Codex server.
| 5.4529
| 5.145936
| 1.059652
|
check_bind(self)
creating = self.id is None
if creating and not self.__class__._has_schema_method("create"):
raise MethodNotSupported("{} do not support creating.".format(self.__class__.__name__))
if not creating and not self.__class__._has_schema_method("update"):
raise MethodNotSupported("{} do not support updating.".format(self.__class__.__name__))
try:
self._resource.save()
except HTTPError as e:
if e.response.status_code == 400:
err_json = e.response.json().get("errors", [])
msg = pretty_print_error(err_json)
raise ServerError(msg)
elif e.response.status_code == 404:
action = "creating" if creating else "updating"
raise MethodNotSupported(
"{} do not support {}.".format(self.__class__.__name__, action)
)
elif e.response.status_code == 409:
raise ServerError("This {} object already exists".format(self.__class__.__name__))
else:
raise e
|
def save(self)
|
Either create or persist changes on this object back to the One Codex server.
| 2.876935
| 2.647536
| 1.086646
|
if not isinstance(file_path, tuple):
raise OneCodexException("Cannot get the interleaved filename without a tuple.")
if re.match(".*[._][Rr][12][_.].*", file_path[0]):
return re.sub("[._][Rr][12]", "", file_path[0])
else:
warnings.warn("Paired-end filenames do not match--are you sure they are correct?")
return file_path[0]
|
def interleaved_filename(file_path)
|
Return filename used to represent a set of paired-end files. Assumes Illumina-style naming
conventions where each file has _R1_ or _R2_ in its name.
| 6.019132
| 5.040504
| 1.194153
|
_, ext = os.path.splitext(file_path)
if uncompressed:
if ext in {".gz", ".gzip"}:
with gzip.GzipFile(file_path, mode="rb") as fp:
try:
fp.seek(0, os.SEEK_END)
return fp.tell()
except ValueError:
# on python2, cannot seek from end and must instead read to end
fp.seek(0)
while len(fp.read(8192)) != 0:
pass
return fp.tell()
elif ext in {".bz", ".bz2", ".bzip", ".bzip2"}:
with bz2.BZ2File(file_path, mode="rb") as fp:
fp.seek(0, os.SEEK_END)
return fp.tell()
return os.path.getsize(file_path)
|
def _file_size(file_path, uncompressed=False)
|
Return size of a single file, compressed or uncompressed
| 2.34716
| 2.302177
| 1.019539
|
if isinstance(file_path, tuple):
assert len(file_path) == 2
file_size = sum(_file_size(f, uncompressed=True) for f in file_path)
file_path = interleaved_filename(file_path)
paired = True
else:
file_size = _file_size(file_path, uncompressed=False)
paired = False
new_filename, ext = os.path.splitext(os.path.basename(file_path))
if ext in {".gz", ".gzip", ".bz", ".bz2", ".bzip"}:
compressed = ext
new_filename, ext = os.path.splitext(new_filename)
else:
compressed = ""
# strip compressed extension if paired-end, since we're going to upload uncompressed
if paired and compressed:
final_filename = new_filename + ext
else:
final_filename = new_filename + ext + compressed
if enforce_fastx:
if ext in {".fa", ".fna", ".fasta"}:
file_format = "fasta"
elif ext in {".fq", ".fastq"}:
file_format = "fastq"
else:
raise UploadException(
"{}: extension must be one of .fa, .fna, .fasta, .fq, .fastq".format(final_filename)
)
else:
file_format = None
if file_size == 0:
raise UploadException("{}: empty files can not be uploaded".format(final_filename))
return final_filename, file_size, file_format
|
def _file_stats(file_path, enforce_fastx=True)
|
Return information about the file path (or paths, if paired), prior to upload.
Parameters
----------
file_path : `string` or `tuple`
System path to the file(s) to be uploaded
Returns
-------
`string`
Filename, minus compressed extension (.gz or .bz2). If paired, use first path to generate
the filename that will be used to represent both paths in the pair.
`integer`
If paired, the uncompressed file size of both files in the path. If single, the raw file
size whether compressed or not. Pairs are always uploaded uncompressed, whereas single files
are uploaded in whatever format they're in. One Codex will uncompress and re-compress as
appropriate.
{'fasta', 'fastq'}
The format of the file being uploaded, guessed only by its extension. If paired, this
determines how many lines to pull from each file during interleaving.
| 2.730831
| 2.496361
| 1.093925
|
upload_args = {
"filename": file_name,
"size": file_size,
"upload_type": "standard", # this is multipart form data
}
if metadata:
# format metadata keys as snake case
new_metadata = {}
for md_key, md_val in metadata.items():
new_metadata[snake_case(md_key)] = md_val
upload_args["metadata"] = new_metadata
if tags:
upload_args["tags"] = tags
if project:
upload_args["project"] = getattr(project, "id", project)
try:
upload_info = samples_resource.init_upload(upload_args)
except requests.exceptions.HTTPError as e:
raise_api_error(e.response, state="init")
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
return upload_info
|
def _call_init_upload(file_name, file_size, metadata, tags, project, samples_resource)
|
Call init_upload at the One Codex API and return data used to upload the file.
Parameters
----------
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
file_size : `integer`
Accurate size of file to be uploaded, in bytes.
metadata : `dict`, optional
tags : `list`, optional
project : `string`, optional
UUID of project to associate this sample with.
samples_resource : `onecodex.models.Samples`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Returns
-------
`dict`
Contains, at a minimum, 'upload_url' and 'sample_id'. Should also contain various additional
data used to upload the file to fastx-proxy, a user's S3 bucket, or an intermediate bucket.
| 2.934888
| 3.289283
| 0.892258
|
upload_args = {"filename": file_name}
if metadata:
# format metadata keys as snake case
new_metadata = {}
for md_key, md_val in metadata.items():
new_metadata[snake_case(md_key)] = md_val
upload_args["metadata"] = new_metadata
if tags:
upload_args["tags"] = tags
if project:
upload_args["project"] = getattr(project, "id", project)
return upload_args
|
def _make_retry_fields(file_name, metadata, tags, project)
|
Generate fields to send to init_multipart_upload in the case that a Sample upload via
fastx-proxy fails.
Parameters
----------
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
metadata : `dict`, optional
tags : `list`, optional
project : `string`, optional
UUID of project to associate this sample with.
Returns
-------
`dict`
Contains metadata fields that will be integrated into the Sample model created when
init_multipart_upload is called.
| 2.693162
| 3.025556
| 0.890138
|
filename, file_size, file_format = _file_stats(files)
# if filename cannot be represented as ascii, raise and suggest renaming
try:
# python2
ascii_fname = unidecode(unicode(filename))
except NameError:
ascii_fname = unidecode(filename)
if filename != ascii_fname:
if coerce_ascii:
# TODO: Consider warnings.warn here instead
logging.warn(
"Renaming {} to {}, must be ASCII\n".format(filename.encode("utf-8"), ascii_fname)
)
filename = ascii_fname
else:
raise OneCodexException("Filenames must be ascii. Try using --coerce-ascii")
# disable progressbar while keeping context manager
if not progressbar:
progressbar = FakeProgressBar()
# file_path is the path to the file on this disk. file_name is what we'll call the file in the
# mainline database. file_size is the sum of both files in a pair, or the size of an unpaired
# file. if paired, file_size is the uncompressed size. if unpaired, file_size is the actual
# size on disk. unpaired files are uploaded as-is. paired files are decompressed, interleaved,
# and uploaded as uncompressed data.
with progressbar as bar:
if isinstance(files, tuple):
fobj = FASTXInterleave(files, file_size, file_format, bar)
else:
fobj = FilePassthru(files, file_size, bar)
# must call init_upload in this loop in order to get a sample uuid we can call
# cancel_upload on later if user hits ctrl+c
fields = _call_init_upload(filename, file_size, metadata, tags, project, samples_resource)
def cancel_atexit():
bar.canceled = True
bar.update(1)
logging.info("Canceled upload for sample: {}".format(fields["sample_id"]))
samples_resource.cancel_upload({"sample_id": fields["sample_id"]})
atexit_register(cancel_atexit)
# if the upload via init_upload fails, upload_sequence_fileobj will call
# init_multipart_upload, which accepts metadata to be integrated into a newly-created
# Sample model. if the s3 intermediate route is used, two Sample models will ultimately
# exist on mainline: the failed fastx-proxy upload and the successful s3 intermediate.
retry_fields = _make_retry_fields(filename, metadata, tags, project)
try:
sample_id = upload_sequence_fileobj(
fobj, filename, fields, retry_fields, session, samples_resource
)
atexit_unregister(cancel_atexit)
return sample_id
except KeyboardInterrupt:
cancel_atexit()
atexit_unregister(cancel_atexit)
raise
|
def upload_sequence(
files,
session,
samples_resource,
metadata=None,
tags=None,
project=None,
coerce_ascii=False,
progressbar=None,
)
|
Uploads a sequence file (or pair of files) to the One Codex server via either our proxy or directly to S3.
Parameters
----------
files : `list`
A list of paths to files on the system, or tuples containing pairs of paths. Tuples will be
interleaved as paired-end reads and both files should contain the same number of records.
Paths to single files will be uploaded as-is.
session : `requests.Session`
Connection to One Codex API.
samples_resource : `onecodex.models.Samples`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` methods.
metadata : `dict`, optional
tags : `list`, optional
project : `string`, optional
UUID of project to associate this sample with.
coerce_ascii : `bool`, optional
If true, rename unicode filenames to ASCII and issue warning.
progressbar : `click.progressbar`, optional
If passed, display a progress bar using Click.
Returns
-------
A Sample object for the completed upload.
| 6.465041
| 6.242902
| 1.035583
|
# need an OrderedDict to preserve field order for S3, required for Python 2.7
multipart_fields = OrderedDict()
for k, v in fields["additional_fields"].items():
multipart_fields[str(k)] = str(v)
# this attribute is only in FASTXInterleave and FilePassthru
mime_type = getattr(file_obj, "mime_type", "text/plain")
multipart_fields["file"] = (file_name, file_obj, mime_type)
encoder = MultipartEncoder(multipart_fields)
upload_request = None
try:
upload_request = session.post(
fields["upload_url"],
data=encoder,
headers={"Content-Type": encoder.content_type},
auth={},
)
except requests.exceptions.ConnectionError:
pass
# If we expect a status *always* try to check it,
# waiting up to 4 hours for buffering to complete (~30-50GB file gzipped)
if "status_url" in fields["additional_fields"]:
now = time.time()
while time.time() < (now + 60 * 60 * 4):
try:
resp = session.post(
fields["additional_fields"]["status_url"],
json={"sample_id": fields["sample_id"]},
)
resp.raise_for_status()
except (ValueError, requests.exceptions.RequestException) as e:
logging.debug("Retrying due to error: {}".format(e))
raise RetryableUploadException(
"Unexpected failure of direct upload proxy. Retrying..."
)
if resp.json() and resp.json().get("complete", True) is False:
logging.debug("Blocking on waiting for proxy to complete (in progress)...")
time.sleep(30)
else:
break
# Return is successfully processed
if resp.json().get("code") in [200, 201]:
file_obj.close()
return
elif resp.json().get("code") == 500:
logging.debug("Retrying due to 500 from proxy...")
raise RetryableUploadException("Unexpected issue with direct upload proxy. Retrying...")
else:
raise_api_error(resp, state="upload")
# Direct to S3 case
else:
file_obj.close()
if upload_request.status_code not in [200, 201]:
raise RetryableUploadException("Unknown connectivity issue with proxy upload.")
# Issue a callback -- this only happens in the direct-to-S3 case
try:
if not fields["additional_fields"].get("callback_url"):
samples_resource.confirm_upload(
{"sample_id": fields["sample_id"], "upload_type": "standard"}
)
except requests.exceptions.HTTPError as e:
raise_api_error(e.response, state="callback")
except requests.exceptions.ConnectionError:
raise_connectivity_error()
|
def _direct_upload(file_obj, file_name, fields, session, samples_resource)
|
Uploads a single file-like object via our validating proxy. Maintains compatibility with direct upload
to a user's S3 bucket as well in case we disable our validating proxy.
Parameters
----------
file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object
A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the
case of paired files, they will be interleaved and uploaded uncompressed. In the case of a
single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed
or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'.
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST. Must include 'sample_id' and
'upload_url' at a minimum.
samples_resource : `onecodex.models.Samples`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Raises
------
RetryableUploadException
In cases where the proxy is temporarily down or we experience connectivity issues
UploadException
In other cases where the proxy determines the upload is invalid and should *not* be retried.
| 4.346481
| 3.859188
| 1.126268
|
# First attempt to upload via our validating proxy
try:
_direct_upload(file_obj, file_name, fields, session, samples_resource)
sample_id = fields["sample_id"]
except RetryableUploadException:
# upload failed--retry direct upload to S3 intermediate
logging.error("{}: Connectivity issue, trying direct upload...".format(file_name))
file_obj.seek(0) # reset file_obj back to start
try:
retry_fields = samples_resource.init_multipart_upload(retry_fields)
except requests.exceptions.HTTPError as e:
raise_api_error(e.response, state="init")
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
s3_upload = _s3_intermediate_upload(
file_obj,
file_name,
retry_fields,
session,
samples_resource._client._root_url + retry_fields["callback_url"], # full callback url
)
sample_id = s3_upload.get("sample_id", "<UUID not yet assigned>")
logging.info("{}: finished as sample {}".format(file_name, sample_id))
return sample_id
|
def upload_sequence_fileobj(file_obj, file_name, fields, retry_fields, session, samples_resource)
|
Uploads a single file-like object to the One Codex server via either fastx-proxy or directly
to S3.
Parameters
----------
file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object
A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the
case of paired files, they will be interleaved and uploaded uncompressed. In the case of a
single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed
or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'.
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST. Must include 'sample_id' and
'upload_url' at a minimum.
retry_fields : `dict`
Metadata sent to `init_multipart_upload` in the case that the upload via fastx-proxy fails.
session : `requests.Session`
Connection to One Codex API.
samples_resource : `onecodex.models.Samples`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Raises
------
UploadException
In the case of a fatal exception during an upload.
Returns
-------
`string` containing sample ID of newly uploaded file.
| 5.071667
| 4.744657
| 1.068922
|
if not isinstance(file_path, six.string_types):
raise ValueError(
"Expected file_path to be a string, got {}".format(type(file_path).__name__)
)
file_name, file_size, _ = _file_stats(file_path, enforce_fastx=False)
# disable progressbar while keeping context manager
if not progressbar:
progressbar = FakeProgressBar()
with progressbar as bar:
fobj = FilePassthru(file_path, file_size, bar)
document_id = upload_document_fileobj(fobj, file_name, session, documents_resource)
bar.finish()
return document_id
|
def upload_document(file_path, session, documents_resource, progressbar=None)
|
Uploads multiple document files to the One Codex server directly to S3 via an intermediate
bucket.
Parameters
----------
file_path : `str`
A path to a file on the system.
session : `requests.Session`
Connection to One Codex API.
documents_resource : `onecodex.models.Documents`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` methods.
progressbar : `click.progressbar`, optional
If passed, display a progress bar using Click.
Raises
------
UploadException
In the case of a fatal exception during an upload.
Returns
-------
A `str` document ID for the newly uploaded file.
| 4.528031
| 4.892467
| 0.925511
|
try:
fields = documents_resource.init_multipart_upload()
except requests.exceptions.HTTPError as e:
raise_api_error(e.response, state="init")
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
s3_upload = _s3_intermediate_upload(
file_obj,
file_name,
fields,
session,
documents_resource._client._root_url + fields["callback_url"], # full callback url
)
document_id = s3_upload.get("document_id", "<UUID not yet assigned>")
logging.info("{}: finished as document {}".format(file_name, document_id))
return document_id
|
def upload_document_fileobj(file_obj, file_name, session, documents_resource, log=None)
|
Uploads a single file-like object to the One Codex server directly to S3.
Parameters
----------
file_obj : `FilePassthru`, or a file-like object
If a file-like object is given, its mime-type will be sent as 'text/plain'. Otherwise,
`FilePassthru` will send a compressed type if the file is gzip'd or bzip'd.
file_name : `string`
The file_name you wish to associate this file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST.
session : `requests.Session`
Connection to One Codex API.
documents_resource : `onecodex.models.Documents`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Notes
-----
In contrast to `upload_sample_fileobj`, this method will /only/ upload to an S3 intermediate
bucket--not via our direct proxy or directly to a user's S3 bucket with a signed request.
Raises
------
UploadException
In the case of a fatal exception during an upload.
Returns
-------
`string` containing sample UUID of newly uploaded file.
| 5.761273
| 5.600255
| 1.028752
|
import boto3
from boto3.s3.transfer import TransferConfig
from boto3.exceptions import S3UploadFailedError
# actually do the upload
client = boto3.client(
"s3",
aws_access_key_id=fields["upload_aws_access_key_id"],
aws_secret_access_key=fields["upload_aws_secret_access_key"],
)
# if boto uses threads, ctrl+c won't work
config = TransferConfig(use_threads=False)
# let boto3 update our progressbar rather than our FASTX wrappers, if applicable
boto_kwargs = {}
if hasattr(file_obj, "progressbar"):
boto_kwargs["Callback"] = file_obj.progressbar.update
file_obj.progressbar = None
try:
client.upload_fileobj(
file_obj,
fields["s3_bucket"],
fields["file_id"],
ExtraArgs={"ServerSideEncryption": "AES256"},
Config=config,
**boto_kwargs
)
except S3UploadFailedError:
raise_connectivity_error(file_name)
# issue a callback
try:
resp = session.post(
callback_url,
json={
"s3_path": "s3://{}/{}".format(fields["s3_bucket"], fields["file_id"]),
"filename": file_name,
"import_as_document": fields.get("import_as_document", False),
},
)
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
if resp.status_code != 200:
raise_connectivity_error(file_name)
try:
return resp.json()
except ValueError:
return {}
|
def _s3_intermediate_upload(file_obj, file_name, fields, session, callback_url)
|
Uploads a single file-like object to an intermediate S3 bucket which One Codex can pull from
after receiving a callback.
Parameters
----------
file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object
A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the
case of paired files, they will be interleaved and uploaded uncompressed. In the case of a
single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed
or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'.
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST.
callback_url : `string`
API callback at One Codex which will trigger a pull from this S3 bucket.
Raises
------
UploadException
In the case of a fatal exception during an upload. Note we rely on boto3 to handle its own retry logic.
Returns
-------
`dict` : JSON results from internal confirm import callback URL
| 2.767095
| 2.868206
| 0.964748
|
assert loc == 0
# rewind progress bar
if self.progressbar:
self.progressbar.update(-self._tell)
self._fp_left.seek(loc)
self._fp_right.seek(loc)
self._tell = loc
self._buf = Buffer()
|
def seek(self, loc)
|
Called if upload fails and must be retried.
| 7.161305
| 7.357125
| 0.973384
|
assert loc == 0
# rewind progress bar
if self.progressbar:
self.progressbar.update(-self._fp.tell())
self._fp.seek(loc)
|
def seek(self, loc)
|
Called if upload fails and must be retried.
| 7.871243
| 8.153096
| 0.96543
|
old_localizable_dict = generate_localization_key_to_entry_dictionary_from_file(old_strings_file)
output_file_elements = []
f = open_strings_file(new_strings_file, "r+")
for header_comment, comments, key, value in extract_header_comment_key_value_tuples_from_file(f):
if len(header_comment) > 0:
output_file_elements.append(Comment(header_comment))
localize_value = value
if key in old_localizable_dict:
localize_value = old_localizable_dict[key].value
output_file_elements.append(LocalizationEntry(comments, key, localize_value))
f.close()
write_file_elements_to_strings_file(old_strings_file, output_file_elements)
|
def merge_strings_files(old_strings_file, new_strings_file)
|
Merges the old strings file with the new one.
Args:
old_strings_file (str): The path to the old strings file (previously produced, and possibly altered)
new_strings_file (str): The path to the new strings file (newly produced).
| 3.15447
| 3.347608
| 0.942306
|
parser.add_argument("--log_path", default="", help="The log file path")
parser.add_argument("--verbose", help="Increase logging verbosity", action="store_true")
|
def configure_parser(self, parser)
|
Adds the necessary supported arguments to the argument parser.
Args:
parser (argparse.ArgumentParser): The parser to add arguments to.
| 3.950506
| 4.479877
| 0.881833
|
parser = argparse.ArgumentParser(description=self.description())
self.configure_parser(parser)
self.run(parser.parse_args())
|
def run_with_standalone_parser(self)
|
Will run the operation as standalone with a new ArgumentParser
| 3.720838
| 3.125711
| 1.190397
|
if metric not in ("simpson", "chao1", "shannon"):
raise OneCodexException(
"For alpha diversity, metric must be one of: simpson, chao1, shannon"
)
# needs read counts, not relative abundances
if self._guess_normalized():
raise OneCodexException("Alpha diversity requires unnormalized read counts.")
df = self.to_df(rank=rank, normalize=False)
output = {"classification_id": [], metric: []}
for c_id in df.index:
output["classification_id"].append(c_id)
output[metric].append(
skbio.diversity.alpha_diversity(metric, df.loc[c_id].tolist(), [c_id]).values[0]
)
return pd.DataFrame(output).set_index("classification_id")
|
def alpha_diversity(self, metric="simpson", rank="auto")
|
Caculate the diversity within a community.
Parameters
----------
metric : {'simpson', 'chao1', 'shannon'}
The diversity metric to calculate.
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
Returns
-------
pandas.DataFrame, a distance matrix.
| 4.169699
| 3.945018
| 1.056953
|
if metric not in ("jaccard", "braycurtis", "cityblock"):
raise OneCodexException(
"For beta diversity, metric must be one of: jaccard, braycurtis, cityblock"
)
# needs read counts, not relative abundances
if self._guess_normalized():
raise OneCodexException("Beta diversity requires unnormalized read counts.")
df = self.to_df(rank=rank, normalize=False)
counts = []
for c_id in df.index:
counts.append(df.loc[c_id].tolist())
return skbio.diversity.beta_diversity(metric, counts, df.index.tolist())
|
def beta_diversity(self, metric="braycurtis", rank="auto")
|
Calculate the diversity between two communities.
Parameters
----------
metric : {'jaccard', 'braycurtis', 'cityblock'}
The distance metric to calculate.
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
Returns
-------
skbio.stats.distance.DistanceMatrix, a distance matrix.
| 4.138778
| 4.000818
| 1.034483
|
# needs read counts, not relative abundances
if self._guess_normalized():
raise OneCodexException("UniFrac requires unnormalized read counts.")
df = self.to_df(rank=rank, normalize=False)
counts = []
for c_id in df.index:
counts.append(df.loc[c_id].tolist())
tax_ids = df.keys().tolist()
tree = self.tree_build()
tree = self.tree_prune_rank(tree, rank=df.ocx_rank)
# there's a bug (?) in skbio where it expects the root to only have
# one child, so we do a little faking here
from skbio.tree import TreeNode
new_tree = TreeNode(name="fake root")
new_tree.rank = "no rank"
new_tree.append(tree)
# then finally run the calculation and return
if weighted:
return skbio.diversity.beta_diversity(
"weighted_unifrac", counts, df.index.tolist(), tree=new_tree, otu_ids=tax_ids
)
else:
return skbio.diversity.beta_diversity(
"unweighted_unifrac", counts, df.index.tolist(), tree=new_tree, otu_ids=tax_ids
)
|
def unifrac(self, weighted=True, rank="auto")
|
A beta diversity metric that takes into account the relative relatedness of community
members. Weighted UniFrac looks at abundances, unweighted UniFrac looks at presence.
Parameters
----------
weighted : `bool`
Calculate the weighted (True) or unweighted (False) distance metric.
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
Returns
-------
skbio.stats.distance.DistanceMatrix, a distance matrix.
| 4.907873
| 4.882109
| 1.005277
|
# TODO: Hit programmatic endpoint to fetch JWT key, not API key
with requests.Session() as session:
# get the login page normally
text = session.get(server_url + "login").text
# retrieve the CSRF token out of it
csrf = re.search('type="hidden" value="([^"]+)"', text).group(1)
# and resubmit using the username/password *and* the CSRF
login_data = {
"email": username,
"password": password,
"csrf_token": csrf,
"next": "/api/get_token",
}
page = session.post(server_url + "login", data=login_data)
try:
key = page.json()["key"]
except (ValueError, KeyError): # ValueError includes simplejson.decoder.JSONDecodeError
key = None
return key
|
def fetch_api_key_from_uname(username, password, server_url)
|
Retrieves an API key from the One Codex webpage given the username and password
| 4.420158
| 4.450814
| 0.993112
|
def version_inadequate(client_version, server_version):
client_version = tuple([int(x) for x in client_version.split("-")[0].split(".")])
server_version = tuple([int(x) for x in server_version.split(".")])
return client_version < server_version
# this will probably live on /api/v0 forever for compat with older CLI versions
data = requests.post(server + "api/v0/check_for_cli_update", data={"version": version})
if data.status_code != 200:
return False, "Error connecting to server"
data = data.json()
latest_version = data["latest_version"]
if version_inadequate(version, latest_version):
return (
True,
(
"Please upgrade your client to the latest version (v{}) using the command "
"`pip install --upgrade onecodex`".format(latest_version)
),
)
return False, None
|
def check_version(version, server)
|
Check if the current CLI version is supported by the One Codex backend.
Parameters
----------
version : `string`
Current version of the One Codex client library
server : `string`
Complete URL to One Codex server, e.g., https://app.onecodex.com
Returns
-------
`tuple` containing two values:
- True if the user *must* upgrade their software, otherwise False
- An error message if the user should upgrade, otherwise None.
| 3.843582
| 3.464996
| 1.10926
|
if value is not None and len(value) != 32:
raise click.BadParameter(
"API Key must be 32 characters long, not {}".format(str(len(value)))
)
else:
return value
|
def valid_api_key(ctx, param, value)
|
Ensures an API has valid length (this is a click callback)
| 2.565888
| 2.172046
| 1.181323
|
if not no_pretty:
click.echo(
json.dumps(j, cls=PotionJSONEncoder, sort_keys=True, indent=4, separators=(",", ": "))
)
else:
click.echo(j)
|
def pprint(j, no_pretty)
|
Prints as formatted JSON
| 2.994063
| 2.964657
| 1.009919
|
v = sys.version_info
if v.major == 3:
return False # Python 2 issue
if v.major == 2 and v.minor == 7 and v.micro >= 9:
return False # >= 2.7.9 includes the new SSL updates
try:
import OpenSSL # noqa
import ndg # noqa
import pyasn1 # noqa
except ImportError:
pass
return True
|
def is_insecure_platform()
|
Checks if the current system is missing an SSLContext object
| 4.894665
| 4.788428
| 1.022186
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.