_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q14600
|
set_event_mgr
|
train
|
def set_event_mgr(dinfo, event_mgr, context=None):
"""Wrapper for openjpeg library function opj_set_event_mgr.
"""
argtypes = [ctypes.POINTER(CommonStructType),
ctypes.POINTER(EventMgrType),
ctypes.c_void_p]
OPENJPEG.opj_set_event_mgr.argtypes = argtypes
OPENJPEG.opj_set_event_mgr(ctypes.cast(dinfo,
ctypes.POINTER(CommonStructType)),
event_mgr, context)
|
python
|
{
"resource": ""
}
|
q14601
|
check_load
|
train
|
def check_load(grid, mode):
""" Checks for over-loading of branches and transformers for MV or LV grid.
Parameters
----------
grid : GridDing0
Grid identifier.
mode : str
Kind of grid ('MV' or 'LV').
Returns
-------
:obj:`dict`
Dict of critical branches with max. relative overloading, and the
following format::
{
branch_1: rel_overloading_1,
...,
branch_n: rel_overloading_n
}
:any:`list` of :obj:`GridDing0`
List of critical transformers with the following format::
[trafo_1, ..., trafo_m]
Notes
-----
Lines'/cables' max. capacity (load case and feed-in case) are taken from [#]_.
References
----------
.. [#] dena VNS
See Also
--------
ding0.flexopt.reinforce_measures.reinforce_branches_current :
ding0.flexopt.reinforce_measures.reinforce_branches_voltage :
"""
crit_branches = {}
crit_stations = []
if mode == 'MV':
# load load factors (conditions) for cables, lines and trafos for load- and feedin case
# load_factor_mv_trans_lc_normal = float(cfg_ding0.get('assumptions',
# 'load_factor_mv_trans_lc_normal'))
load_factor_mv_line_lc_normal = float(cfg_ding0.get('assumptions',
'load_factor_mv_line_lc_normal'))
load_factor_mv_cable_lc_normal = float(cfg_ding0.get('assumptions',
'load_factor_mv_cable_lc_normal'))
#load_factor_mv_trans_fc_normal = float(cfg_ding0.get('assumptions',
# 'load_factor_mv_trans_fc_normal'))
load_factor_mv_line_fc_normal = float(cfg_ding0.get('assumptions',
'load_factor_mv_line_fc_normal'))
load_factor_mv_cable_fc_normal = float(cfg_ding0.get('assumptions',
'load_factor_mv_cable_fc_normal'))
mw2kw = 1e3
kw2mw = 1e-3
# STEP 1: check branches' loads
for branch in grid.graph_edges():
s_max_th = 3**0.5 * branch['branch'].type['U_n'] * branch['branch'].type['I_max_th']
if branch['branch'].kind == 'line':
s_max_th_lcfc = [s_max_th * load_factor_mv_line_lc_normal,
s_max_th * load_factor_mv_line_fc_normal]
elif branch['branch'].kind == 'cable':
s_max_th_lcfc = [s_max_th * load_factor_mv_cable_lc_normal,
s_max_th * load_factor_mv_cable_fc_normal]
else:
raise ValueError('Branch kind is invalid!')
# check loads only for non-aggregated Load Areas (aggregated ones are skipped raising except)
try:
# check if s_res exceeds allowed values for laod and feedin case
# CAUTION: The order of values is fix! (1. load case, 2. feedin case)
if any([s_res * mw2kw > _ for s_res, _ in zip(branch['branch'].s_res, s_max_th_lcfc)]):
# save max. relative overloading
crit_branches[branch] = max(branch['branch'].s_res) * mw2kw / s_max_th
except:
pass
# STEP 2: check HV-MV station's load
# NOTE: HV-MV station reinforcement is not required for status-quo
# scenario since HV-MV trafos already sufficient for load+generation
# case as done in MVStationDing0.choose_transformers()
# OLD snippet:
# cum_peak_load = grid.grid_district.peak_load
# cum_peak_generation = grid.station().peak_generation(mode='MVLV')
#
# # reinforcement necessary only if generation > load
# if cum_peak_generation > cum_peak_load:
# grid.station().choose_transformers
#
# cum_trafo_capacity = sum((_.s_max_a for _ in grid.station().transformers()))
#
# max_trafo = max((_.s_max_a for _ in grid.station().transformers()))
#
# # determine number and size of required transformers
# kw2mw = 1e-3
# residual_apparent_power = cum_generation_sum * kw2mw - \
# cum_trafo_capacity
elif mode == 'LV':
raise NotImplementedError
if crit_branches:
logger.info('==> {} branches have load issues.'.format(
len(crit_branches)))
if crit_stations:
logger.info('==> {} stations have load issues.'.format(
len(crit_stations)))
return crit_branches, crit_stations
|
python
|
{
"resource": ""
}
|
q14602
|
check_voltage
|
train
|
def check_voltage(grid, mode):
""" Checks for voltage stability issues at all nodes for MV or LV grid
Parameters
----------
grid : GridDing0
Grid identifier.
mode : str
Kind of grid ('MV' or 'LV').
Returns
-------
:any:`list` of :any:`GridDing0`
List of critical nodes, sorted descending by voltage difference.
Notes
-----
The examination is done in two steps, according to [#]_ :
1. It is checked #TODO: what?
2. #TODO: what's next?
References
----------
.. [#] dena VNS
"""
crit_nodes = {}
if mode == 'MV':
# load max. voltage difference for load and feedin case
mv_max_v_level_lc_diff_normal = float(cfg_ding0.get('mv_routing_tech_constraints',
'mv_max_v_level_lc_diff_normal'))
mv_max_v_level_fc_diff_normal = float(cfg_ding0.get('mv_routing_tech_constraints',
'mv_max_v_level_fc_diff_normal'))
# check nodes' voltages
voltage_station = grid._station.voltage_res
for node in grid.graph_nodes_sorted():
try:
# compare node's voltage with max. allowed voltage difference for load and feedin case
if (abs(voltage_station[0] - node.voltage_res[0]) > mv_max_v_level_lc_diff_normal) or\
(abs(voltage_station[1] - node.voltage_res[1]) > mv_max_v_level_fc_diff_normal):
crit_nodes[node] = {'node': node,
'v_diff': max([abs(v2-v1) for v1, v2 in zip(node.voltage_res, voltage_station)])}
except:
pass
elif mode == 'LV':
raise NotImplementedError
if crit_nodes:
logger.info('==> {} nodes have voltage issues.'.format(len(crit_nodes)))
return [_['node'] for _ in sorted(crit_nodes.values(), key=lambda _: _['v_diff'], reverse=True)]
|
python
|
{
"resource": ""
}
|
q14603
|
get_critical_line_loading
|
train
|
def get_critical_line_loading(grid):
"""
Assign line loading to each branch determined by peak load and peak
generation of descendant branches
The attribute `s_res` is a list of two elements
1. apparent power in load case
2. apparent power in feed-in case
Parameters
----------
grid : ding0.core.network.grids.LVGridDing0
Ding0 LV grid object
Returns
-------
:any:`list`
List of critical branches incl. its line loading
:any:`list`
List of critical stations incl. its transformer loading
"""
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
cos_phi_feedin = cfg_ding0.get('assumptions', 'cos_phi_gen')
lf_trafo_load = cfg_ding0.get('assumptions',
"load_factor_lv_trans_lc_normal")
lf_trafo_gen = cfg_ding0.get('assumptions',
"load_factor_lv_trans_fc_normal")
critical_branches = []
critical_stations = []
# Convert grid to a tree (is a directed graph)
# based on this tree, descendants of each node are accessible
station = grid._station
tree = nx.dfs_tree(grid._graph, station)
for node in tree.nodes():
# list of descendant nodes including the node itself
descendants = list(nx.descendants(tree, node))
descendants.append(node)
if isinstance(node, LVStationDing0):
# determine cumulative peak load at node and assign to branch
peak_load, peak_gen = peak_load_generation_at_node(descendants)
if grid.id_db == 61107:
if isinstance(node, LVStationDing0):
print(node)
# get trafos cumulative apparent power
s_max_trafos = sum([_.s_max_a for _ in node._transformers])
# compare with load and generation connected to
if (((peak_load / cos_phi_load) > s_max_trafos * lf_trafo_load) or
((peak_gen / cos_phi_feedin) > s_max_trafos * lf_trafo_gen)):
critical_stations.append(
{'station': node,
's_max': [
peak_load / cos_phi_load,
peak_gen / cos_phi_feedin]})
else:
# preceeding node of node
predecessors = list(tree.predecessors(node))
# a non-meshed grid topology returns a list with only 1 item
predecessor = predecessors[0]
# get preceeding
branches = grid.graph_branches_from_node(node)
preceeding_branch = [branch for branch in branches
if branch[0] is predecessor][0]
# determine cumulative peak load at node and assign to branch
peak_load, peak_gen = peak_load_generation_at_node(descendants)
s_max_th = 3 ** 0.5 * preceeding_branch[1]['branch'].type['U_n'] * \
preceeding_branch[1]['branch'].type['I_max_th'] / 1e3
if (((peak_load / cos_phi_load) > s_max_th) or
((peak_gen / cos_phi_feedin) > s_max_th)):
critical_branches.append(
{'branch': preceeding_branch[1]['branch'],
's_max': [
peak_load / cos_phi_load,
peak_gen / cos_phi_feedin]})
return critical_branches, critical_stations
|
python
|
{
"resource": ""
}
|
q14604
|
peak_load_generation_at_node
|
train
|
def peak_load_generation_at_node(nodes):
"""
Get maximum occuring load and generation at a certain node
Summarizes peak loads and nominal generation power of descendant nodes
of a branch
Parameters
----------
nodes : :any:`list`
Any LV grid Ding0 node object that is part of the grid topology
Return
------
:any:`float`
peak_load : Sum of peak loads of descendant nodes
:any:`float`
peak_generation : Sum of nominal power of generation at descendant nodes
"""
loads = [node.peak_load for node in nodes
if isinstance(node, LVLoadDing0)]
peak_load = sum(loads)
generation = [node.capacity for node in nodes
if isinstance(node, GeneratorDing0)]
peak_generation = sum(generation)
return peak_load, peak_generation
|
python
|
{
"resource": ""
}
|
q14605
|
voltage_delta_stub
|
train
|
def voltage_delta_stub(grid, tree, main_branch_node, stub_node, r_preceeding,
x_preceedig):
"""
Determine voltage for stub branches
Parameters
----------
grid : LVGridDing0
Ding0 grid object
tree : :networkx:`NetworkX Graph Obj< >`
Tree of grid topology
main_branch_node : graph node
Node of main branch that stub branch node in connected to
main_branch : dict
Nodes of main branch
r_preceeding : float
Resitance of preceeding grid
x_preceeding : float
Reactance of preceeding grid
Return
------
:any:`float`
Delta voltage for node
"""
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
cos_phi_feedin = cfg_ding0.get('assumptions', 'cos_phi_gen')
v_nom = cfg_ding0.get('assumptions', 'lv_nominal_voltage')
omega = 2 * math.pi * 50
stub_branch = [_ for _ in grid.graph_branches_from_node(main_branch_node) if
_[0] == stub_node][0][1]
r_stub = stub_branch['branch'].type['R'] * stub_branch[
'branch'].length / 1e3
x_stub = stub_branch['branch'].type['L'] / 1e3 * omega * \
stub_branch['branch'].length / 1e3
s_max_gen = [_.capacity / cos_phi_feedin
for _ in tree.successors(stub_node)
if isinstance(_, GeneratorDing0)]
if s_max_gen:
s_max_gen = s_max_gen[0]
v_delta_stub_gen = voltage_delta_vde(v_nom, s_max_gen, r_stub + r_preceeding,
x_stub + x_preceedig, cos_phi_feedin)
else:
v_delta_stub_gen = 0
s_max_load = [_.peak_load / cos_phi_load
for _ in tree.successors(stub_node)
if isinstance(_, LVLoadDing0)]
if s_max_load:
s_max_load = s_max_load[0]
v_delta_stub_load = voltage_delta_vde(v_nom, s_max_load, r_stub + r_preceeding,
x_stub + x_preceedig, cos_phi_load)
else:
v_delta_stub_load = 0
return [v_delta_stub_load, v_delta_stub_gen]
|
python
|
{
"resource": ""
}
|
q14606
|
get_voltage_at_bus_bar
|
train
|
def get_voltage_at_bus_bar(grid, tree):
"""
Determine voltage level at bus bar of MV-LV substation
Parameters
----------
grid : LVGridDing0
Ding0 grid object
tree : :networkx:`NetworkX Graph Obj< >`
Tree of grid topology:
Returns
-------
:any:`list`
Voltage at bus bar. First item refers to load case, second item refers
to voltage in feedin (generation) case
"""
# voltage at substation bus bar
r_mv_grid, x_mv_grid = get_mv_impedance(grid)
r_trafo = sum([tr.r for tr in grid._station._transformers])
x_trafo = sum([tr.x for tr in grid._station._transformers])
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
cos_phi_feedin = cfg_ding0.get('assumptions', 'cos_phi_gen')
v_nom = cfg_ding0.get('assumptions', 'lv_nominal_voltage')
# loads and generators connected to bus bar
bus_bar_load = sum(
[node.peak_load for node in tree.successors(grid._station)
if isinstance(node, LVLoadDing0)]) / cos_phi_load
bus_bar_generation = sum(
[node.capacity for node in tree.successors(grid._station)
if isinstance(node, GeneratorDing0)]) / cos_phi_feedin
v_delta_load_case_bus_bar = voltage_delta_vde(v_nom,
bus_bar_load,
(r_mv_grid + r_trafo),
(x_mv_grid + x_trafo),
cos_phi_load)
v_delta_gen_case_bus_bar = voltage_delta_vde(v_nom,
bus_bar_generation,
(r_mv_grid + r_trafo),
-(x_mv_grid + x_trafo),
cos_phi_feedin)
return v_delta_load_case_bus_bar, v_delta_gen_case_bus_bar
|
python
|
{
"resource": ""
}
|
q14607
|
LocalSearchSolver.operator_cross
|
train
|
def operator_cross(self, graph, solution, op_diff_round_digits):
# TODO: check docstring
"""applies Cross inter-route operator to solution
Takes every node from every route and calculates savings when inserted
into all possible positions in other routes. Insertion is done at
position with max. saving and procedure starts over again with newly
created graph as input. Stops when no improvement is found.
Args
----
graph: :networkx:`NetworkX Graph Obj< >`
Descr
solution: BaseSolution
Descr
op_diff_round_digits: float
Precision (floating point digits) for rounding route length differences.
*Details*: In some cases when an exchange is performed on two routes with one node each,
the difference between the both solutions (before and after the exchange) is not zero.
This is due to internal rounding errors of float type. So the loop won't break
(alternating between these two solutions), we need an additional criterion to avoid
this behaviour: A threshold to handle values very close to zero as if they were zero
(for a more detaisled description of the matter see http://floating-point-gui.de or
https://docs.python.org/3.5/tutorial/floatingpoint.html)
Returns
-------
LocalSearchSolution
A solution (LocalSearchSolution class)
Todo
----
* allow moves of a 2-node chain
* Remove ugly nested loops, convert to more efficient matrix operations
"""
# shorter var names for loop
dm = graph._matrix
dn = graph._nodes
|
python
|
{
"resource": ""
}
|
q14608
|
LocalSearchSolver.solve
|
train
|
def solve(self, graph, savings_solution, timeout, debug=False, anim=None):
"""Improve initial savings solution using local search
Parameters
----------
graph: :networkx:`NetworkX Graph Obj< >`
Graph instance
savings_solution: SavingsSolution
initial solution of CVRP problem (instance of `SavingsSolution` class)
timeout: int
max processing time in seconds
debug: bool, defaults to False
If True, information is printed while routing
anim: AnimationDing0
AnimationDing0 object
Returns
-------
LocalSearchSolution
A solution (LocalSearchSolution class)
"""
# TODO: If necessary, use timeout to set max processing time of local search
# load threshold for operator (see exchange or relocate operator's description for more information)
op_diff_round_digits = int(cfg_ding0.get('mv_routing', 'operator_diff_round_digits'))
solution = LocalSearchSolution(graph, savings_solution)
# FOR BENCHMARKING OF OPERATOR'S ORDER:
#self.benchmark_operator_order(graph, savings_solution, op_diff_round_digits)
for run in range(10):
start = time.time()
solution = self.operator_exchange(graph, solution, op_diff_round_digits, anim)
time1 = time.time()
if debug:
logger.debug('Elapsed time (exchange, run {1}): {0}, '
'Solution\'s length: {2}'.format(
time1 - start, str(run), solution.length()))
solution = self.operator_relocate(graph, solution, op_diff_round_digits, anim)
time2 = time.time()
if debug:
logger.debug('Elapsed time (relocate, run {1}): {0}, '
'Solution\'s length: {2}'.format(
time2 - time1, str(run), solution.length()))
solution = self.operator_oropt(graph, solution, op_diff_round_digits, anim)
time3 = time.time()
if debug:
logger.debug('Elapsed time (oropt, run {1}): {0}, '
'Solution\'s length: {2}'.format(
time3 - time2, str(run), solution.length()))
return solution
|
python
|
{
"resource": ""
}
|
q14609
|
get_branches
|
train
|
def get_branches(grid):
"""
Individual graphs of sectoral loads
:param geid:
:return:
"""
station = grid._station
tree = nx.dfs_tree(grid._graph, station)
# TODO: idea
# 1. build tree from lv_grid station as root -> diretions should point to
# descending leafs
# 2. for analysis of current issues get list of descendants with
# nx.descendants(tree, station). Sum peak load / gen capacity
# 3. Extract nodes belonging to main route of a branch by checking all
# successors if these are LVCalbleDistributors
# notes and hints:
# 1. associated edges can be accessed via grid._graph.in_edges(<node>)
# respectively grid._graph.out_edges(<node>)
# 2. when using nx.descendants(tree, station) make sure the order of nodes
# is maintained as this is important to properly assess voltage and over-
# loading issues
# first_cbl_dists = [x for x in grid._graph.neighbors(station)
# if isinstance(x, LVCableDistributorDing0)]
# if len(first_cbl_dists) > 0:
# ancestors = nx.ancestors(grid._graph, first_cbl_dists[0])
# else:
# ancestors = None
# return ancestors
branch_heads = list(nx.neighbors(tree, station))
descendants = {branch_head: list(nx.descendants(tree, branch_head)) for
branch_head in branch_heads}
return descendants
|
python
|
{
"resource": ""
}
|
q14610
|
NetworkDing0.add_mv_grid_district
|
train
|
def add_mv_grid_district(self, mv_grid_district):
"""Adds a MV grid_district to _mv_grid_districts if not already existing"""
# TODO: use setter method here (make attribute '_mv_grid_districts' private)
if mv_grid_district not in self.mv_grid_districts():
self._mv_grid_districts.append(mv_grid_district)
|
python
|
{
"resource": ""
}
|
q14611
|
NetworkDing0.get_mvgd_lvla_lvgd_obj_from_id
|
train
|
def get_mvgd_lvla_lvgd_obj_from_id(self):
""" Build dict with mapping from LVLoadAreaDing0 id to LVLoadAreaDing0 object,
MVGridDistrictDing0 id to MVGridDistrictDing0 object,
LVGridDistrictDing0 id to LVGridDistrictDing0 object and
LVStationDing0 id to LVStationDing0 object
Returns
-------
:obj:`dict`
mv_grid_districts_dict::
{
mv_grid_district_id_1: mv_grid_district_obj_1,
...,
mv_grid_district_id_n: mv_grid_district_obj_n
}
:obj:`dict`
lv_load_areas_dict::
{
lv_load_area_id_1: lv_load_area_obj_1,
...,
lv_load_area_id_n: lv_load_area_obj_n
}
:obj:`dict`
lv_grid_districts_dict::
{
lv_grid_district_id_1: lv_grid_district_obj_1,
...,
lv_grid_district_id_n: lv_grid_district_obj_n
}
:obj:`dict`
lv_stations_dict::
{
lv_station_id_1: lv_station_obj_1,
...,
lv_station_id_n: lv_station_obj_n
}
"""
mv_grid_districts_dict = {}
lv_load_areas_dict = {}
lv_grid_districts_dict = {}
lv_stations_dict = {}
for mv_grid_district in self.mv_grid_districts():
mv_grid_districts_dict[mv_grid_district.id_db] = mv_grid_district
for lv_load_area in mv_grid_district.lv_load_areas():
lv_load_areas_dict[lv_load_area.id_db] = lv_load_area
for lv_grid_district in lv_load_area.lv_grid_districts():
lv_grid_districts_dict[lv_grid_district.id_db] = lv_grid_district
lv_stations_dict[lv_grid_district.lv_grid.station().id_db] = lv_grid_district.lv_grid.station()
return mv_grid_districts_dict, lv_load_areas_dict, lv_grid_districts_dict, lv_stations_dict
|
python
|
{
"resource": ""
}
|
q14612
|
NetworkDing0.build_mv_grid_district
|
train
|
def build_mv_grid_district(self, poly_id, subst_id, grid_district_geo_data,
station_geo_data):
"""Initiates single MV grid_district including station and grid
Parameters
----------
poly_id: int
ID of grid_district according to database table. Also used as ID for created grid #TODO: check type
subst_id: int
ID of station according to database table #TODO: check type
grid_district_geo_data: :shapely:`Shapely Polygon object<polygons>`
Polygon of grid district
station_geo_data: :shapely:`Shapely Point object<points>`
Point of station
Returns
-------
:shapely:`Shapely Polygon object<polygons>`
Description of return #TODO: check
"""
mv_station = MVStationDing0(id_db=subst_id, geo_data=station_geo_data)
mv_grid = MVGridDing0(network=self,
id_db=poly_id,
station=mv_station)
mv_grid_district = MVGridDistrictDing0(id_db=poly_id,
mv_grid=mv_grid,
geo_data=grid_district_geo_data)
mv_grid.grid_district = mv_grid_district
mv_station.grid = mv_grid
self.add_mv_grid_district(mv_grid_district)
return mv_grid_district
|
python
|
{
"resource": ""
}
|
q14613
|
NetworkDing0.build_lv_grid_district
|
train
|
def build_lv_grid_district(self,
lv_load_area,
lv_grid_districts,
lv_stations):
"""Instantiates and associates lv_grid_district incl grid and station.
The instantiation creates more or less empty objects including relevant
data for transformer choice and grid creation
Parameters
----------
lv_load_area: :shapely:`Shapely Polygon object<polygons>`
load_area object
lv_grid_districts: :pandas:`pandas.DataFrame<dataframe>`
Table containing lv_grid_districts of according load_area
lv_stations : :pandas:`pandas.DataFrame<dataframe>`
Table containing lv_stations of according load_area
"""
# There's no LVGD for current LA
# -> TEMP WORKAROUND: Create single LVGD from LA, replace unknown valuess by zero
# TODO: Fix #155 (see also: data_processing #68)
if len(lv_grid_districts) == 0:
# raise ValueError(
# 'Load Area {} has no LVGD - please re-open #155'.format(
# repr(lv_load_area)))
geom = wkt_dumps(lv_load_area.geo_area)
lv_grid_districts = \
lv_grid_districts.append(
pd.DataFrame(
{'la_id': [lv_load_area.id_db],
'geom': [geom],
'population': [0],
'peak_load_residential': [lv_load_area.peak_load_residential],
'peak_load_retail': [lv_load_area.peak_load_retail],
'peak_load_industrial': [lv_load_area.peak_load_industrial],
'peak_load_agricultural': [lv_load_area.peak_load_agricultural],
'sector_count_residential': [0],
'sector_count_retail': [0],
'sector_count_industrial': [0],
'sector_count_agricultural': [0],
'sector_consumption_residential': [0],
'sector_consumption_retail': [0],
'sector_consumption_industrial': [0],
'sector_consumption_agricultural': [0]
},
index=[lv_load_area.id_db]
)
)
lv_nominal_voltage = cfg_ding0.get('assumptions', 'lv_nominal_voltage')
# Associate lv_grid_district to load_area
for id, row in lv_grid_districts.iterrows():
lv_grid_district = LVGridDistrictDing0(
id_db=id,
lv_load_area=lv_load_area,
geo_data=wkt_loads(row['geom']),
population=0 if isnan(row['population']) else int(row['population']),
peak_load_residential=row['peak_load_residential'],
peak_load_retail=row['peak_load_retail'],
peak_load_industrial=row['peak_load_industrial'],
peak_load_agricultural=row['peak_load_agricultural'],
peak_load=(row['peak_load_residential'] +
row['peak_load_retail'] +
row['peak_load_industrial'] +
row['peak_load_agricultural']),
sector_count_residential=int(row['sector_count_residential']),
sector_count_retail=int(row['sector_count_retail']),
sector_count_industrial=int(row['sector_count_industrial']),
sector_count_agricultural=int(row['sector_count_agricultural']),
sector_consumption_residential=row[
'sector_consumption_residential'],
sector_consumption_retail=row['sector_consumption_retail'],
sector_consumption_industrial=row[
'sector_consumption_industrial'],
sector_consumption_agricultural=row[
'sector_consumption_agricultural'])
# be aware, lv_grid takes grid district's geom!
lv_grid = LVGridDing0(network=self,
grid_district=lv_grid_district,
id_db=id,
geo_data=wkt_loads(row['geom']),
v_level=lv_nominal_voltage)
# create LV station
lv_station = LVStationDing0(
id_db=id,
grid=lv_grid,
lv_load_area=lv_load_area,
geo_data=wkt_loads(lv_stations.loc[id, 'geom'])
if id in lv_stations.index.values
else lv_load_area.geo_centre,
peak_load=lv_grid_district.peak_load)
# assign created objects
# note: creation of LV grid is done separately,
# see NetworkDing0.build_lv_grids()
lv_grid.add_station(lv_station)
lv_grid_district.lv_grid = lv_grid
lv_load_area.add_lv_grid_district(lv_grid_district)
|
python
|
{
"resource": ""
}
|
q14614
|
NetworkDing0.import_mv_grid_districts
|
train
|
def import_mv_grid_districts(self, session, mv_grid_districts_no=None):
""" Imports MV Grid Districts, HV-MV stations, Load Areas, LV Grid Districts
and MV-LV stations, instantiates and initiates objects.
Parameters
----------
session : sqlalchemy.orm.session.Session
Database session
mv_grid_districts : List of MV grid_districts/stations (int) to be imported (if empty,
all grid_districts & stations are imported)
See Also
--------
build_mv_grid_district : used to instantiate MV grid_district objects
import_lv_load_areas : used to import load_areas for every single MV grid_district
ding0.core.structure.regions.MVGridDistrictDing0.add_peak_demand : used to summarize peak loads of underlying load_areas
"""
# check arguments
if not all(isinstance(_, int) for _ in mv_grid_districts_no):
raise TypeError('`mv_grid_districts` has to be a list of integers.')
# get srid settings from config
try:
srid = str(int(cfg_ding0.get('geo', 'srid')))
except OSError:
logger.exception('cannot open config file.')
# build SQL query
grid_districts = session.query(self.orm['orm_mv_grid_districts'].subst_id,
func.ST_AsText(func.ST_Transform(
self.orm['orm_mv_grid_districts'].geom, srid)). \
label('poly_geom'),
func.ST_AsText(func.ST_Transform(
self.orm['orm_mv_stations'].point, srid)). \
label('subs_geom')).\
join(self.orm['orm_mv_stations'], self.orm['orm_mv_grid_districts'].subst_id ==
self.orm['orm_mv_stations'].subst_id).\
filter(self.orm['orm_mv_grid_districts'].subst_id.in_(mv_grid_districts_no)). \
filter(self.orm['version_condition_mvgd']). \
filter(self.orm['version_condition_mv_stations']). \
distinct()
# read MV data from db
mv_data = pd.read_sql_query(grid_districts.statement,
session.bind,
index_col='subst_id')
# iterate over grid_district/station datasets and initiate objects
for poly_id, row in mv_data.iterrows():
subst_id = poly_id
region_geo_data = wkt_loads(row['poly_geom'])
# transform `region_geo_data` to epsg 3035
# to achieve correct area calculation of mv_grid_district
station_geo_data = wkt_loads(row['subs_geom'])
# projection = partial(
# pyproj.transform,
# pyproj.Proj(init='epsg:4326'), # source coordinate system
# pyproj.Proj(init='epsg:3035')) # destination coordinate system
#
# region_geo_data = transform(projection, region_geo_data)
mv_grid_district = self.build_mv_grid_district(poly_id,
subst_id,
region_geo_data,
station_geo_data)
# import all lv_stations within mv_grid_district
lv_stations = self.import_lv_stations(session)
# import all lv_grid_districts within mv_grid_district
lv_grid_districts = self.import_lv_grid_districts(session, lv_stations)
# import load areas
self.import_lv_load_areas(session,
mv_grid_district,
lv_grid_districts,
lv_stations)
# add sum of peak loads of underlying lv grid_districts to mv_grid_district
mv_grid_district.add_peak_demand()
logger.info('=====> MV Grid Districts imported')
|
python
|
{
"resource": ""
}
|
q14615
|
NetworkDing0.import_config
|
train
|
def import_config(self):
""" Loads parameters from config files
Returns
-------
int
config object #TODO check type
"""
# load parameters from configs
cfg_ding0.load_config('config_db_tables.cfg')
cfg_ding0.load_config('config_calc.cfg')
cfg_ding0.load_config('config_files.cfg')
cfg_ding0.load_config('config_misc.cfg')
cfg_dict = cfg_ding0.cfg._sections
return cfg_dict
|
python
|
{
"resource": ""
}
|
q14616
|
NetworkDing0.import_pf_config
|
train
|
def import_pf_config(self):
""" Creates power flow config class and imports config from file
Returns
-------
PFConfigDing0
PFConfigDing0 object
"""
scenario = cfg_ding0.get("powerflow", "test_grid_stability_scenario")
start_hour = int(cfg_ding0.get("powerflow", "start_hour"))
end_hour = int(cfg_ding0.get("powerflow", "end_hour"))
start_time = datetime(1970, 1, 1, 00, 00, 0)
resolution = cfg_ding0.get("powerflow", "resolution")
srid = str(int(cfg_ding0.get('geo', 'srid')))
return PFConfigDing0(scenarios=[scenario],
timestep_start=start_time,
timesteps_count=end_hour-start_hour,
srid=srid,
resolution=resolution)
|
python
|
{
"resource": ""
}
|
q14617
|
NetworkDing0.mv_routing
|
train
|
def mv_routing(self, debug=False, animation=False):
""" Performs routing on all MV grids.
Parameters
----------
debug: bool, default to False
If True, information is printed while routing
animation: bool, default to False
If True, images of route modification steps are exported during routing process. A new animation object is created.
See Also
--------
ding0.core.network.grids.MVGridDing0.routing : for details on MVGridDing0 objects routing
ding0.tools.animation.AnimationDing0 : for details on animation function.
"""
if animation:
anim = AnimationDing0()
else:
anim = None
for grid_district in self.mv_grid_districts():
grid_district.mv_grid.routing(debug=debug, anim=anim)
logger.info('=====> MV Routing (Routing, Connection of Satellites & '
'Stations) performed')
|
python
|
{
"resource": ""
}
|
q14618
|
NetworkDing0.build_lv_grids
|
train
|
def build_lv_grids(self):
""" Builds LV grids for every non-aggregated LA in every MV grid
district using model grids.
"""
for mv_grid_district in self.mv_grid_districts():
for load_area in mv_grid_district.lv_load_areas():
if not load_area.is_aggregated:
for lv_grid_district in load_area.lv_grid_districts():
lv_grid_district.lv_grid.build_grid()
else:
logger.info(
'{} is of type aggregated. No grid is created.'.format(repr(load_area)))
logger.info('=====> LV model grids created')
|
python
|
{
"resource": ""
}
|
q14619
|
NetworkDing0.control_circuit_breakers
|
train
|
def control_circuit_breakers(self, mode=None):
""" Opens or closes all circuit breakers of all MV grids.
Args
----
mode: str
Set mode='open' to open, mode='close' to close
"""
for grid_district in self.mv_grid_districts():
if mode == 'open':
grid_district.mv_grid.open_circuit_breakers()
elif mode == 'close':
grid_district.mv_grid.close_circuit_breakers()
else:
raise ValueError('\'mode\' is invalid.')
if mode == 'open':
logger.info('=====> MV Circuit Breakers opened')
elif mode == 'close':
logger.info('=====> MV Circuit Breakers closed')
|
python
|
{
"resource": ""
}
|
q14620
|
NetworkDing0.metadata
|
train
|
def metadata(self, run_id=None):
"""Provide metadata on a Ding0 run
Parameters
----------
run_id: str, (defaults to current date)
Distinguish multiple versions of Ding0 data by a `run_id`. If not
set it defaults to current date in the format YYYYMMDDhhmmss
Returns
-------
dict
Metadata
"""
# Get latest version and/or git commit hash
try:
version = subprocess.check_output(
["git", "describe", "--tags", "--always"]).decode('utf8')
except:
version = None
# Collect names of database table used to run Ding0 and data version
if self.config['input_data_source']['input_data'] == 'versioned':
data_version = self.config['versioned']['version']
database_tables = self.config['versioned']
elif self.config['input_data_source']['input_data'] == 'model_draft':
data_version = 'model_draft'
database_tables = self.config['model_draft']
else:
data_version = 'unknown'
database_tables = 'unknown'
# Collect assumptions
assumptions = {}
assumptions.update(self.config['assumptions'])
assumptions.update(self.config['mv_connect'])
assumptions.update(self.config['mv_routing'])
assumptions.update(self.config['mv_routing_tech_constraints'])
# Determine run_id if not set
if not run_id:
run_id = datetime.now().strftime("%Y%m%d%H%M%S")
# Set instance attribute run_id
if not self._run_id:
self._run_id = run_id
# Assing data to dict
metadata = dict(
version=version,
mv_grid_districts=[int(_.id_db) for _ in self._mv_grid_districts],
database_tables=database_tables,
data_version=data_version,
assumptions=assumptions,
run_id=self._run_id
)
return metadata
|
python
|
{
"resource": ""
}
|
q14621
|
create_dir
|
train
|
def create_dir(dirpath):
"""Create directory and report about it
Parameters
----------
dirpath : str
Directory including path
"""
if not os.path.isdir(dirpath):
os.mkdir(dirpath)
print("We create a directory for you and your Ding0 data: {}".format(
dirpath))
|
python
|
{
"resource": ""
}
|
q14622
|
get_default_home_dir
|
train
|
def get_default_home_dir():
"""
Return default home directory of Ding0
Returns
-------
:any:`str`
Default home directory including its path
"""
ding0_dir = str(cfg_ding0.get('config',
'config_dir'))
return os.path.join(os.path.expanduser('~'), ding0_dir)
|
python
|
{
"resource": ""
}
|
q14623
|
SavingsSolution.is_complete
|
train
|
def is_complete(self):
"""Returns True if this is a complete solution, i.e, all nodes are allocated
Todo
----
TO BE REVIEWED
Returns
-------
bool
True if this is a complete solution.
"""
allocated = all(
[node.route_allocation() is not None for node in list(self._nodes.values()) if node.name() != self._problem.depot().name()]
)
valid_routes = len(self._routes) == 1 #workaround: try to use only one route (otherwise process will stop if no of vehicles is reached)
return allocated and valid_routes
|
python
|
{
"resource": ""
}
|
q14624
|
SavingsSolution.process
|
train
|
def process(self, pair):
# TODO: check docstring
"""Processes a pair of nodes into the current solution
MUST CREATE A NEW INSTANCE, NOT CHANGE ANY INSTANCE ATTRIBUTES
Returns a new instance (deep copy) of self object
Args
----
pair : type
description
Returns
-------
type
Description (Copy of self?)
"""
a, b = pair
new_solution = self.clone()
i, j = new_solution.get_pair((a, b))
route_i = i.route_allocation()
route_j = j.route_allocation()
inserted = False
if ((route_i is not None and route_j is not None) and (route_i != route_j)):
if route_i._nodes.index(i) == 0 and route_j._nodes.index(j) == len(route_j._nodes) - 1:
if route_j.can_allocate(route_i._nodes):
route_j.allocate(route_i._nodes)
if i.route_allocation() != j.route_allocation():
raise Exception('wtf')
inserted = True
elif route_j._nodes.index(j) == 0 and route_i._nodes.index(i) == len(route_i._nodes) - 1:
if route_i.can_allocate(route_j._nodes):
route_i.allocate(route_j._nodes)
if i.route_allocation() != j.route_allocation():
raise Exception('wtf j')
inserted = True
new_solution._routes = [route for route in new_solution._routes if route._nodes]
return new_solution, inserted
|
python
|
{
"resource": ""
}
|
q14625
|
SavingsSolution.can_process
|
train
|
def can_process(self, pairs):
"""Returns True if this solution can process `pairs`
Parameters
----------
pairs: :any:`list` of pairs of Route
List of pairs
Returns
-------
bool
True if this solution can process `pairs`.
"""
i, j = pairs
# Neither points are in a route
if i.route_allocation() is None or j.route_allocation() is None:
return True
if self._allocated == len(list(self._problem.nodes())) - 1: # All nodes in a route
return False
return False
|
python
|
{
"resource": ""
}
|
q14626
|
ClarkeWrightSolver.compute_savings_list
|
train
|
def compute_savings_list(self, graph):
"""Compute Clarke and Wright savings list
A saving list is a matrix containing the saving amount S between i and j
S is calculated by S = d(0,i) + d(0,j) - d(i,j) (CLARKE; WRIGHT, 1964)
Args
----
graph: :networkx:`NetworkX Graph Obj< >`
A NetworkX graaph is used.
Returns
-------
:any:`list` of `Node`
List of nodes sorted by its savings
"""
savings_list = {}
for i, j in graph.edges():
# t = (i, j)
if repr(i) < repr(j):
t = (i, j)
else:
t = (j, i)
if i == graph.depot() or j == graph.depot():
continue
savings_list[t] = graph.distance(graph.depot(), i) + graph.distance(graph.depot(), j) - graph.distance(i, j)
sorted_savings_list = sorted(list(savings_list.items()), key=operator.itemgetter(1), reverse=True)
return [nodes for nodes, saving in sorted_savings_list]
|
python
|
{
"resource": ""
}
|
q14627
|
ClarkeWrightSolver.solve
|
train
|
def solve(self, graph, timeout, debug=False, anim=None):
"""Solves the CVRP problem using Clarke and Wright Savings methods
Parameters
----------
graph: :networkx:`NetworkX Graph Obj< >`
A NetworkX graaph is used.
timeout: int
max processing time in seconds
debug: bool, defaults to False
If True, information is printed while routing
anim: AnimationDing0
Returns
-------
SavingsSolution
A solution
"""
savings_list = self.compute_savings_list(graph)
solution = SavingsSolution(graph)
start = time.time()
for i, j in savings_list[:]:
if solution.is_complete():
break
if solution.can_process((i, j)):
solution, inserted = solution.process((i, j))
if inserted:
savings_list.remove((i, j))
if anim:
solution.draw_network(anim)
if time.time() - start > timeout:
break
return solution
|
python
|
{
"resource": ""
}
|
q14628
|
export_to_dir
|
train
|
def export_to_dir(network, export_dir):
"""
Exports PyPSA network as CSV files to directory
Args:
network: pypsa.Network
export_dir: str
Sub-directory in output/debug/grid/ where csv Files of PyPSA network are exported to.
"""
package_path = ding0.__path__[0]
network.export_to_csv_folder(os.path.join(package_path,
'output',
'debug',
'grid',
export_dir))
|
python
|
{
"resource": ""
}
|
q14629
|
edges_to_dict_of_dataframes
|
train
|
def edges_to_dict_of_dataframes(grid, edges):
"""
Export edges to DataFrame
Parameters
----------
grid: ding0.Network
edges: list
Edges of Ding0.Network graph
Returns
-------
edges_dict: dict
"""
omega = 2 * pi * 50
srid = int(cfg_ding0.get('geo', 'srid'))
lines = {'line_id': [], 'bus0': [], 'bus1': [], 'x': [], 'r': [],
's_nom': [], 'length': [], 'cables': [], 'geom': [],
'grid_id': []}
# iterate over edges and add them one by one
for edge in edges:
line_name = '_'.join(['MV',
str(grid.id_db),
'lin',
str(edge['branch'].id_db)])
# TODO: find the real cause for being L, C, I_th_max type of Series
if (isinstance(edge['branch'].type['L'], Series) or
isinstance(edge['branch'].type['C'], Series)):
x = omega * edge['branch'].type['L'].values[0] * 1e-3
else:
x = omega * edge['branch'].type['L'] * 1e-3
if isinstance(edge['branch'].type['R'], Series):
r = edge['branch'].type['R'].values[0]
else:
r = edge['branch'].type['R']
if (isinstance(edge['branch'].type['I_max_th'], Series) or
isinstance(edge['branch'].type['U_n'], Series)):
s_nom = sqrt(3) * edge['branch'].type['I_max_th'].values[0] * \
edge['branch'].type['U_n'].values[0]
else:
s_nom = sqrt(3) * edge['branch'].type['I_max_th'] * \
edge['branch'].type['U_n']
# get lengths of line
l = edge['branch'].length / 1e3
lines['line_id'].append(line_name)
lines['bus0'].append(edge['adj_nodes'][0].pypsa_id)
lines['bus1'].append(edge['adj_nodes'][1].pypsa_id)
lines['x'].append(x * l)
lines['r'].append(r * l)
lines['s_nom'].append(s_nom)
lines['length'].append(l)
lines['cables'].append(3)
lines['geom'].append(from_shape(
LineString([edge['adj_nodes'][0].geo_data,
edge['adj_nodes'][1].geo_data]),
srid=srid))
lines['grid_id'].append(grid.id_db)
return {'Line': DataFrame(lines).set_index('line_id')}
|
python
|
{
"resource": ""
}
|
q14630
|
run_powerflow_onthefly
|
train
|
def run_powerflow_onthefly(components, components_data, grid, export_pypsa_dir=None, debug=False):
"""
Run powerflow to test grid stability
Two cases are defined to be tested here:
i) load case
ii) feed-in case
Parameters
----------
components: dict of pandas.DataFrame
components_data: dict of pandas.DataFrame
export_pypsa_dir: str
Sub-directory in output/debug/grid/ where csv Files of PyPSA network are exported to.
Export is omitted if argument is empty.
"""
scenario = cfg_ding0.get("powerflow", "test_grid_stability_scenario")
start_hour = cfg_ding0.get("powerflow", "start_hour")
end_hour = cfg_ding0.get("powerflow", "end_hour")
# choose temp_id
temp_id_set = 1
timesteps = 2
start_time = datetime(1970, 1, 1, 00, 00, 0)
resolution = 'H'
# inspect grid data for integrity
if debug:
data_integrity(components, components_data)
# define investigated time range
timerange = DatetimeIndex(freq=resolution,
periods=timesteps,
start=start_time)
# TODO: Instead of hard coding PF config, values from class PFConfigDing0 can be used here.
# create PyPSA powerflow problem
network, snapshots = create_powerflow_problem(timerange, components)
# import pq-sets
for key in ['Load', 'Generator']:
for attr in ['p_set', 'q_set']:
# catch MV grid districts without generators
if not components_data[key].empty:
series = transform_timeseries4pypsa(components_data[key][
attr].to_frame(),
timerange,
column=attr)
import_series_from_dataframe(network,
series,
key,
attr)
series = transform_timeseries4pypsa(components_data['Bus']
['v_mag_pu_set'].to_frame(),
timerange,
column='v_mag_pu_set')
import_series_from_dataframe(network,
series,
'Bus',
'v_mag_pu_set')
# add coordinates to network nodes and make ready for map plotting
# network = add_coordinates(network)
# start powerflow calculations
network.pf(snapshots)
# # make a line loading plot
# # TODO: make this optional
# plot_line_loading(network, timestep=0,
# filename='Line_loading_load_case.png')
# plot_line_loading(network, timestep=1,
# filename='Line_loading_feed-in_case.png')
# process results
bus_data, line_data = process_pf_results(network)
# assign results data to graph
assign_bus_results(grid, bus_data)
assign_line_results(grid, line_data)
# export network if directory is specified
if export_pypsa_dir:
export_to_dir(network, export_dir=export_pypsa_dir)
|
python
|
{
"resource": ""
}
|
q14631
|
data_integrity
|
train
|
def data_integrity(components, components_data):
"""
Check grid data for integrity
Parameters
----------
components: dict
Grid components
components_data: dict
Grid component data (such as p,q and v set points)
Returns
-------
"""
data_check = {}
for comp in ['Bus', 'Load']: # list(components_data.keys()):
data_check[comp] = {}
data_check[comp]['length_diff'] = len(components[comp]) - len(
components_data[comp])
# print short report to user and exit program if not integer
for comp in list(data_check.keys()):
if data_check[comp]['length_diff'] != 0:
logger.exception("{comp} data is invalid. You supplied {no_comp} {comp} "
"objects and {no_data} datasets. Check you grid data "
"and try again".format(comp=comp,
no_comp=len(components[comp]),
no_data=len(components_data[comp])))
sys.exit(1)
|
python
|
{
"resource": ""
}
|
q14632
|
parallel_run
|
train
|
def parallel_run(districts_list, n_of_processes, n_of_districts, run_id,
base_path=None):
'''Organize parallel runs of ding0.
The function take all districts in a list and divide them into
n_of_processes parallel processes. For each process, the assigned districts
are given to the function process_runs() with the argument n_of_districts
Parameters
----------
districts_list: list of int
List with all districts to be run.
n_of_processes: int
Number of processes to run in parallel
n_of_districts: int
Number of districts to be run in each cluster given as argument to
process_stats()
run_id: str
Identifier for a run of Ding0. For example it is used to create a
subdirectory of os.path.join(`base_path`, 'results')
base_path : str
Base path for ding0 data (input, results and logs).
Default is `None` which sets it to :code:`~/.ding0` (may deviate on
windows systems).
Specify your own but keep in mind that it a required a particular
structure of subdirectories.
See Also
--------
ding0_runs
'''
# define base path
if base_path is None:
base_path = BASEPATH
if not os.path.exists(os.path.join(base_path, run_id)):
os.makedirs(os.path.join(base_path, run_id))
start = time.time()
#######################################################################
# Define an output queue
output_info = mp.Queue()
#######################################################################
# Setup a list of processes that we want to run
max_dist = len(districts_list)
threat_long = floor(max_dist / n_of_processes)
if threat_long == 0:
threat_long = 1
threats = [districts_list[x:x + threat_long] for x in range(0, len(districts_list), threat_long)]
processes = []
for th in threats:
mv_districts = th
processes.append(mp.Process(target=process_runs,
args=(mv_districts, n_of_districts,
output_info, run_id, base_path)))
#######################################################################
# Run processes
for p in processes:
p.start()
# Resque output_info from processes
output = [output_info.get() for p in processes]
output = list(itertools.chain.from_iterable(output))
# Exit the completed processes
for p in processes:
p.join()
#######################################################################
print('Elapsed time for', str(max_dist),
'MV grid districts (seconds): {}'.format(time.time() - start))
return output
|
python
|
{
"resource": ""
}
|
q14633
|
process_metadata
|
train
|
def process_metadata(meta):
"""
Merge metadata of run on multiple grid districts
Parameters
----------
meta: list of dict
Metadata of run of each MV grid district
Returns
-------
dict
Single metadata dict including merge metadata
"""
mvgds = []
metadata = meta[0]
for mvgd in meta:
if isinstance(mvgd['mv_grid_districts'], list):
mvgds.extend(mvgd['mv_grid_districts'])
else:
mvgds.append(mvgd['mv_grid_districts'])
metadata['mv_grid_districts'] = mvgds
return metadata
|
python
|
{
"resource": ""
}
|
q14634
|
reinforce_grid
|
train
|
def reinforce_grid(grid, mode):
#TODO: finish docstring
""" Evaluates grid reinforcement needs and performs measures
Grid reinforcement according to methods described in [VNSRP]_ supplemented
by [DENA]_.
Parameters
----------
grid: GridDing0
Grid instance
mode: str
Choose of: 'MV' or 'LV'
Notes
-----
Currently only MV branch reinforcement is implemented. HV-MV stations are not
reinforced since not required for status-quo scenario.
References
----------
.. [DENA] Deutsche Energie-Agentur GmbH (dena), "dena-Verteilnetzstudie. Ausbau- und Innovationsbedarf der
Stromverteilnetze in Deutschland bis 2030.", 2012
.. [VNSRP] Ackermann, T., Untsch, S., Koch, M., & Rothfuchs, H. (2014).
Verteilnetzstudie Rheinland-Pfalz. Hg. v. Ministerium für
Wirtschaft, Klimaschutz, Energie und Landesplanung Rheinland-Pfalz
(MWKEL). energynautics GmbH.
"""
# kind of grid to be evaluated (MV or LV)
if mode == 'MV':
crit_branches, crit_stations = check_load(grid, mode)
# STEP 1: reinforce branches
# do reinforcement
reinforce_branches_current(grid, crit_branches)
# if branches or stations have been reinforced: run PF again to check for voltage issues
if crit_branches or crit_stations:
grid.network.run_powerflow(conn=None, method='onthefly')
crit_nodes = check_voltage(grid, mode)
crit_nodes_count_prev_step = len(crit_nodes)
# as long as there are voltage issues, do reinforcement
while crit_nodes:
# determine all branches on the way from HV-MV substation to crit. nodes
crit_branches_v = grid.find_and_union_paths(grid.station(), crit_nodes)
# do reinforcement
reinforce_branches_voltage(grid, crit_branches_v)
# run PF
grid.network.run_powerflow(session=None, method='onthefly')
crit_nodes = check_voltage(grid, mode)
# if there are critical nodes left but no larger cable available, stop reinforcement
if len(crit_nodes) == crit_nodes_count_prev_step:
logger.warning('==> There are {0} branches that cannot be '
'reinforced (no appropriate cable '
'available).'.format(
len(grid.find_and_union_paths(grid.station(),
crit_nodes))))
break
crit_nodes_count_prev_step = len(crit_nodes)
if not crit_nodes:
logger.info('==> All voltage issues in {mode} grid could be '
'solved using reinforcement.'.format(mode=mode))
# STEP 2: reinforce HV-MV station
# NOTE: HV-MV station reinforcement is not required for status-quo
# scenario since HV-MV trafos already sufficient for load+generation
# case as done in MVStationDing0.choose_transformers()
elif mode == 'LV':
# get overloaded branches
# overloading issues
critical_branches, critical_stations = get_critical_line_loading(grid)
# reinforce overloaded lines by increasing size
unresolved = reinforce_lv_branches_overloading(grid, critical_branches)
logger.info(
"Out of {crit_branches} with overloading {unresolved} remain "
"with unresolved issues due to line overloading. "
"LV grid: {grid}".format(
crit_branches=len(critical_branches),
unresolved=len(unresolved),
grid=grid))
# reinforce substations
extend_substation(grid, critical_stations, mode)
# get node with over-voltage
crit_nodes = get_critical_voltage_at_nodes(grid) #over-voltage issues
crit_nodes_count_prev_step = len(crit_nodes)
logger.info('{cnt_crit_branches} in {grid} have voltage issues'.format(
cnt_crit_branches=crit_nodes_count_prev_step,
grid=grid))
# as long as there are voltage issues, do reinforcement
while crit_nodes:
# determine all branches on the way from HV-MV substation to crit. nodes
crit_branches_v = grid.find_and_union_paths(
grid.station(),
[_['node'] for _ in crit_nodes])
# do reinforcement
reinforce_branches_voltage(grid, crit_branches_v, mode)
# get node with over-voltage
crit_nodes = get_critical_voltage_at_nodes(grid)
# if there are critical nodes left but no larger cable available, stop reinforcement
if len(crit_nodes) == crit_nodes_count_prev_step:
logger.warning('==> There are {0} branches that cannot be '
'reinforced (no appropriate cable '
'available).'.format(
len(crit_branches_v)))
break
crit_nodes_count_prev_step = len(crit_nodes)
if not crit_nodes:
logger.info('==> All voltage issues in {mode} grid could be '
'solved using reinforcement.'.format(mode=mode))
# reinforcement of LV stations on voltage issues
crit_stations_voltage = [_ for _ in crit_nodes
if isinstance(_['node'], LVStationDing0)]
if crit_stations_voltage:
extend_substation_voltage(crit_stations_voltage, grid_level='LV')
|
python
|
{
"resource": ""
}
|
q14635
|
example_stats
|
train
|
def example_stats(filename):
"""
Obtain statistics from create grid topology
Prints some statistical numbers and produces exemplary figures
"""
nd = results.load_nd_from_pickle(filename=filename)
nodes_df, edges_df = nd.to_dataframe()
# get statistical numbers about grid
stats = results.calculate_mvgd_stats(nd)
# plot distribution of load/generation of subjacent LV grids
stations = nodes_df[nodes_df['type'] == 'LV Station']
f, axarr = plt.subplots(2, sharex=True)
f.suptitle("Peak load (top) / peak generation capacity (bottom) at LV "
"substations in kW")
stations['peak_load'].hist(bins=20, alpha=0.5, ax=axarr[0])
axarr[0].set_title("Peak load in kW")
stations['generation_capacity'].hist(bins=20, alpha=0.5, ax=axarr[1])
axarr[1].set_title("Peak generation capacity in kW")
plt.show()
# Introduction of report
print("You are analyzing MV grid district {mvgd}\n".format(
mvgd=int(stats.index.values)))
# print all the calculated stats
# this isn't a particularly beautiful format but it is
# information rich
with option_context('display.max_rows', None,
'display.max_columns', None,
'display.max_colwidth', -1):
print(stats.T)
|
python
|
{
"resource": ""
}
|
q14636
|
MVStationDing0.peak_generation
|
train
|
def peak_generation(self, mode):
"""Calculates cumulative peak generation of generators connected to underlying grids
This is done instantaneously using bottom-up approach.
Parameters
----------
mode: str
determines which generators are included::
'MV': Only generation capacities of MV level are considered.
'MVLV': Generation capacities of MV and LV are considered
(= cumulative generation capacities in entire MVGD).
Returns
-------
float
Cumulative peak generation
"""
if mode == 'MV':
return sum([_.capacity for _ in self.grid.generators()])
elif mode == 'MVLV':
# calc MV geno capacities
cum_mv_peak_generation = sum([_.capacity for _ in self.grid.generators()])
# calc LV geno capacities
cum_lv_peak_generation = 0
for load_area in self.grid.grid_district.lv_load_areas():
cum_lv_peak_generation += load_area.peak_generation
return cum_mv_peak_generation + cum_lv_peak_generation
else:
raise ValueError('parameter \'mode\' is invalid!')
|
python
|
{
"resource": ""
}
|
q14637
|
MVStationDing0.set_operation_voltage_level
|
train
|
def set_operation_voltage_level(self):
"""Set operation voltage level
"""
mv_station_v_level_operation = float(cfg_ding0.get('mv_routing_tech_constraints',
'mv_station_v_level_operation'))
self.v_level_operation = mv_station_v_level_operation * self.grid.v_level
|
python
|
{
"resource": ""
}
|
q14638
|
MVStationDing0.select_transformers
|
train
|
def select_transformers(self):
""" Selects appropriate transformers for the HV-MV substation.
The transformers are chosen according to max. of load case and feedin-case
considering load factors.
The HV-MV transformer with the next higher available nominal apparent power is
chosen. If one trafo is not sufficient, multiple trafos are used. Additionally,
in a second step an redundant trafo is installed with max. capacity of the
selected trafos of the first step according to general planning principles for
MV distribution grids (n-1).
Parameters
----------
transformers : dict
Contains technical information of p hv/mv transformers
**kwargs : dict
Should contain a value behind the key 'peak_load'
Notes
-----
Parametrization of transformers bases on [#]_.
Potential hv-mv-transformers are chosen according to [#]_.
References
----------
.. [#] Deutsche Energie-Agentur GmbH (dena), "dena-Verteilnetzstudie.
Ausbau- und Innovationsbedarf der Stromverteilnetze in Deutschland
bis 2030.", 2012
.. [#] X. Tao, "Automatisierte Grundsatzplanung von
Mittelspannungsnetzen", Dissertation, 2006
"""
# get power factor for loads and generators
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
cos_phi_feedin = cfg_ding0.get('assumptions', 'cos_phi_gen')
# get trafo load factors
load_factor_mv_trans_lc_normal = float(cfg_ding0.get('assumptions',
'load_factor_mv_trans_lc_normal'))
load_factor_mv_trans_fc_normal = float(cfg_ding0.get('assumptions',
'load_factor_mv_trans_fc_normal'))
# get equipment parameters of MV transformers
trafo_parameters = self.grid.network.static_data['MV_trafos']
# get peak load and peak generation
cum_peak_load = self.peak_load / cos_phi_load
cum_peak_generation = self.peak_generation(mode='MVLV') / cos_phi_feedin
# check if load or generation is greater respecting corresponding load factor
if (cum_peak_load / load_factor_mv_trans_lc_normal) > \
(cum_peak_generation / load_factor_mv_trans_fc_normal):
# use peak load and load factor from load case
load_factor_mv_trans = load_factor_mv_trans_lc_normal
residual_apparent_power = cum_peak_load
else:
# use peak generation and load factor for feedin case
load_factor_mv_trans = load_factor_mv_trans_fc_normal
residual_apparent_power = cum_peak_generation
# determine number and size of required transformers
# get max. trafo
transformer_max = trafo_parameters.iloc[trafo_parameters['S_nom'].idxmax()]
while residual_apparent_power > 0:
if residual_apparent_power > load_factor_mv_trans * transformer_max['S_nom']:
transformer = transformer_max
else:
# choose trafo
transformer = trafo_parameters.iloc[
trafo_parameters[trafo_parameters['S_nom'] * load_factor_mv_trans >
residual_apparent_power]['S_nom'].idxmin()]
# add transformer on determined size with according parameters
self.add_transformer(TransformerDing0(**{'grid': self.grid,
'v_level': self.grid.v_level,
's_max_longterm': transformer['S_nom']}))
# calc residual load
residual_apparent_power -= (load_factor_mv_trans *
transformer['S_nom'])
# if no transformer was selected (no load in grid district), use smallest one
if len(self._transformers) == 0:
transformer = trafo_parameters.iloc[trafo_parameters['S_nom'].idxmin()]
self.add_transformer(
TransformerDing0(grid=self.grid,
v_level=self.grid.v_level,
s_max_longterm=transformer['S_nom']))
# add redundant transformer of the size of the largest transformer
s_max_max = max((o.s_max_a for o in self._transformers))
self.add_transformer(TransformerDing0(**{'grid': self.grid,
'v_level': self.grid.v_level,
's_max_longterm': s_max_max}))
|
python
|
{
"resource": ""
}
|
q14639
|
create_results_dirs
|
train
|
def create_results_dirs(base_path):
"""Create base path dir and subdirectories
Parameters
----------
base_path : str
The base path has subdirectories for raw and processed results
"""
if not os.path.exists(base_path):
print("Creating directory {} for results data.".format(base_path))
os.mkdir(base_path)
if not os.path.exists(os.path.join(base_path, 'results')):
os.mkdir(os.path.join(base_path, 'results'))
if not os.path.exists(os.path.join(base_path, 'plots')):
os.mkdir(os.path.join(base_path, 'plots'))
if not os.path.exists(os.path.join(base_path, 'info')):
os.mkdir(os.path.join(base_path, 'info'))
if not os.path.exists(os.path.join(base_path, 'log')):
os.mkdir(os.path.join(base_path, 'log'))
|
python
|
{
"resource": ""
}
|
q14640
|
run_multiple_grid_districts
|
train
|
def run_multiple_grid_districts(mv_grid_districts, run_id, failsafe=False,
base_path=None):
"""
Perform ding0 run on given grid districts
Parameters
----------
mv_grid_districs : list
Integers describing grid districts
run_id: str
Identifier for a run of Ding0. For example it is used to create a
subdirectory of os.path.join(`base_path`, 'results')
failsafe : bool
Setting to True enables failsafe mode where corrupt grid districts
(mostly due to data issues) are reported and skipped. Report is to be
found in the log dir under :code:`~/.ding0` . Default is False.
base_path : str
Base path for ding0 data (input, results and logs).
Default is `None` which sets it to :code:`~/.ding0` (may deviate on
windows systems).
Specify your own but keep in mind that it a required a particular
structure of subdirectories.
Returns
-------
msg : str
Traceback of error computing corrupt MV grid district
.. TODO: this is only true if try-except environment is moved into this
fundion and traceback return is implemented
Notes
-----
Consider that a large amount of MV grid districts may take hours or up to
days to compute. A computational run for a single grid district may consume
around 30 secs.
"""
start = time.time()
# define base path
if base_path is None:
base_path = BASEPATH
# database connection/ session
engine = db.connection(section='oedb')
session = sessionmaker(bind=engine)()
corrupt_grid_districts = pd.DataFrame(columns=['id', 'message'])
for mvgd in mv_grid_districts:
# instantiate ding0 network object
nd = NetworkDing0(name='network', run_id=run_id)
if not os.path.exists(os.path.join(base_path, "grids")):
os.mkdir(os.path.join(base_path, "grids"))
if not failsafe:
# run DING0 on selected MV Grid District
msg = nd.run_ding0(session=session,
mv_grid_districts_no=[mvgd])
# save results
results.save_nd_to_pickle(nd, os.path.join(base_path, "grids"))
else:
# try to perform ding0 run on grid district
try:
msg = nd.run_ding0(session=session,
mv_grid_districts_no=[mvgd])
# if not successful, put grid district to report
if msg:
corrupt_grid_districts = corrupt_grid_districts.append(
pd.Series({'id': mvgd,
'message': msg[0]}),
ignore_index=True)
# if successful, save results
else:
results.save_nd_to_pickle(nd, os.path.join(base_path,
"grids"))
except Exception as e:
corrupt_grid_districts = corrupt_grid_districts.append(
pd.Series({'id': mvgd,
'message': e}),
ignore_index=True)
continue
# Merge metadata of multiple runs
if 'metadata' not in locals():
metadata = nd.metadata
else:
if isinstance(mvgd, list):
metadata['mv_grid_districts'].extend(mvgd)
else:
metadata['mv_grid_districts'].append(mvgd)
# Save metadata to disk
with open(os.path.join(base_path, "grids", 'Ding0_{}.meta'.format(run_id)),
'w') as f:
json.dump(metadata, f)
# report on unsuccessful runs
corrupt_grid_districts.to_csv(
os.path.join(
base_path,
"grids",
'corrupt_mv_grid_districts.txt'),
index=False,
float_format='%.0f')
print('Elapsed time for', str(len(mv_grid_districts)),
'MV grid districts (seconds): {}'.format(time.time() - start))
return msg
|
python
|
{
"resource": ""
}
|
q14641
|
_parse_nodes_section
|
train
|
def _parse_nodes_section(f, current_section, nodes):
"""Parse TSPLIB NODE_COORD_SECTION or DEMAND_SECTION from file descript f
Returns a dict containing the node as key
"""
section = {}
dimensions = None
if current_section == 'NODE_COORD_SECTION':
dimensions = 3 # i: (i, j)
elif current_section == 'DEMAND_SECTION':
dimensions = 2 # i: q
else:
raise ParseException('Invalid section {}'.format(current_section))
n = 0
for line in f:
line = strip(line)
# Check dimensions
definitions = re.split(r'\s*', line)
if len(definitions) != dimensions:
raise ParseException('Invalid dimensions from section {}. Expected: {}'.format(current_section, dimensions))
node = int(definitions[0])
values = [int(v) for v in definitions[1:]]
if len(values) == 1:
values = values[0]
section[node] = values
n = n + 1
if n == nodes:
break
# Assert all nodes were read
if n != nodes:
raise ParseException('Missing {} nodes definition from section {}'.format(nodes - n, current_section))
return section
|
python
|
{
"resource": ""
}
|
q14642
|
_parse_edge_weight
|
train
|
def _parse_edge_weight(f, nodes):
"""Parse TSPLIB EDGE_WEIGHT_SECTION from file f
Supports only FULL_MATRIX for now
"""
matrix = []
n = 0
for line in f:
line = strip(line)
regex = re.compile(r'\s+')
row = regex.split(line)
matrix.append(row)
n = n + 1
if n == nodes:
break
if n != nodes:
raise ParseException('Missing {} nodes definition from section EDGE_WEIGHT_SECTION'.format(nodes - n))
return matrix
|
python
|
{
"resource": ""
}
|
q14643
|
_post_process_specs
|
train
|
def _post_process_specs(specs):
"""Post-process specs after pure parsing
Casts any number expected values into integers
Args
----
specs :
Notes
-----
Modifies the specs object
"""
integer_specs = ['DIMENSION', 'CAPACITY']
for s in integer_specs:
specs[s] = int(specs[s])
|
python
|
{
"resource": ""
}
|
q14644
|
_create_node_matrix_from_coord_section
|
train
|
def _create_node_matrix_from_coord_section(specs):
"""Transformed parsed data from NODE_COORD_SECTION into an upper triangular matrix
Calculates distances between nodes
'MATRIX' key added to `specs`
"""
distances = specs['NODE_COORD_SECTION']
specs['MATRIX'] = {}
for i in distances:
origin = tuple(distances[i])
specs['MATRIX'][i] = {}
for j in specs['NODE_COORD_SECTION']:
destination = tuple(distances[j])
distance = calculate_euc_distance(origin, destination)
#
# Upper triangular matrix
# if i > j, ij = 0
#
#if i > j:
# continue
specs['MATRIX'][i][j] = distance
|
python
|
{
"resource": ""
}
|
q14645
|
_create_node_matrix_from_full_matrix
|
train
|
def _create_node_matrix_from_full_matrix(specs):
"""Transform parsed data from EDGE_WEIGHT_SECTION into an upper triangular matrix
'MATRIX' key added to `specs`
"""
old_matrix = specs['EDGE_WEIGHT_SECTION']
nodes = specs['DIMENSION']
specs['MATRIX'] = {}
for i in range(nodes):
specs['MATRIX'][i + 1] = {}
for j in range(nodes):
if i > j:
continue
specs['MATRIX'][i + 1][j + 1] = int(old_matrix[i][j])
|
python
|
{
"resource": ""
}
|
q14646
|
_parse_tsplib
|
train
|
def _parse_tsplib(f):
"""Parses a TSPLIB file descriptor and returns a dict containing the problem definition"""
line = ''
specs = {}
used_specs = ['NAME', 'COMMENT', 'DIMENSION', 'CAPACITY', 'TYPE', 'EDGE_WEIGHT_TYPE']
used_data = ['DEMAND_SECTION', 'DEPOT_SECTION']
# Parse specs part
for line in f:
line = strip(line)
# Arbitrary sort, so we test everything out
s = None
for s in used_specs:
if line.startswith(s):
specs[s] = line.split('{} :'.format(s))[-1].strip() # get value data part
break
if s == 'EDGE_WEIGHT_TYPE' and s in specs and specs[s] == 'EXPLICIT':
used_specs.append('EDGE_WEIGHT_FORMAT')
# All specs read
if len(specs) == len(used_specs):
break
if len(specs) != len(used_specs):
missing_specs = set(used_specs).symmetric_difference(set(specs))
raise ParseException('Error parsing TSPLIB data: specs {} missing'.format(missing_specs))
print(specs)
if specs['EDGE_WEIGHT_TYPE'] == 'EUC_2D':
used_data.append('NODE_COORD_SECTION')
elif specs['EDGE_WEIGHT_FORMAT'] == 'FULL_MATRIX':
used_data.append('EDGE_WEIGHT_SECTION')
else:
raise ParseException('EDGE_WEIGHT_TYPE or EDGE_WEIGHT_FORMAT not supported')
_post_process_specs(specs)
# Parse data part
for line in f:
line = strip(line)
for d in used_data:
if line.startswith(d):
if d == 'DEPOT_SECTION':
specs[d] = _parse_depot_section(f)
elif d in ['NODE_COORD_SECTION', 'DEMAND_SECTION']:
specs[d] = _parse_nodes_section(f, d, specs['DIMENSION'])
elif d == 'EDGE_WEIGHT_SECTION':
specs[d] = _parse_edge_weight(f, specs['DIMENSION'])
if len(specs) == len(used_specs) + len(used_data):
break
if len(specs) != len(used_specs) + len(used_data):
missing_specs = set(specs).symmetric_difference(set(used_specs).union(set(used_data)))
raise ParseException('Error parsing TSPLIB data: specs {} missing'.format(missing_specs))
_post_process_data(specs)
return specs
|
python
|
{
"resource": ""
}
|
q14647
|
MVGridDing0.add_circuit_breaker
|
train
|
def add_circuit_breaker(self, circ_breaker):
"""Creates circuit breaker object and ...
Args
----
circ_breaker: CircuitBreakerDing0
Description #TODO
"""
if circ_breaker not in self._circuit_breakers and isinstance(circ_breaker, CircuitBreakerDing0):
self._circuit_breakers.append(circ_breaker)
self.graph_add_node(circ_breaker)
|
python
|
{
"resource": ""
}
|
q14648
|
MVGridDing0.add_station
|
train
|
def add_station(self, mv_station, force=False):
"""Adds MV station if not already existing
Args
----
mv_station: MVStationDing0
Description #TODO
force: bool
If True, MV Station is set even though it's not empty (override)
"""
if not isinstance(mv_station, MVStationDing0):
raise Exception('Given MV station is not a MVStationDing0 object.')
if self._station is None:
self._station = mv_station
self.graph_add_node(mv_station)
else:
if force:
self._station = mv_station
else:
raise Exception('MV Station already set, use argument `force=True` to override.')
|
python
|
{
"resource": ""
}
|
q14649
|
MVGridDing0.remove_cable_distributor
|
train
|
def remove_cable_distributor(self, cable_dist):
"""Removes a cable distributor from _cable_distributors if existing"""
if cable_dist in self.cable_distributors() and isinstance(cable_dist,
MVCableDistributorDing0):
# remove from array and graph
self._cable_distributors.remove(cable_dist)
if self._graph.has_node(cable_dist):
self._graph.remove_node(cable_dist)
|
python
|
{
"resource": ""
}
|
q14650
|
MVGridDing0.add_ring
|
train
|
def add_ring(self, ring):
"""Adds a ring to _rings if not already existing"""
if ring not in self._rings and isinstance(ring, RingDing0):
self._rings.append(ring)
|
python
|
{
"resource": ""
}
|
q14651
|
MVGridDing0.rings_full_data
|
train
|
def rings_full_data(self):
""" Returns a generator for iterating over each ring
Yields
------
For each ring, tuple composed by ring ID, list of edges, list of nodes
Notes
-----
Circuit breakers must be closed to find rings, this is done automatically.
"""
#close circuit breakers
for circ_breaker in self.circuit_breakers():
if not circ_breaker.status == 'closed':
circ_breaker.close()
logger.info('Circuit breakers were closed in order to find MV '
'rings')
#find True rings (cycles from station through breaker and back to station)
for ring_nodes in nx.cycle_basis(self._graph, root=self._station):
edges_ring = []
for node in ring_nodes:
for edge in self.graph_branches_from_node(node):
nodes_in_the_branch = self.graph_nodes_from_branch(edge[1]['branch'])
if (nodes_in_the_branch[0] in ring_nodes and
nodes_in_the_branch[1] in ring_nodes
):
if not edge[1]['branch'] in edges_ring:
edges_ring.append(edge[1]['branch'])
yield (edges_ring[0].ring,edges_ring,ring_nodes)
|
python
|
{
"resource": ""
}
|
q14652
|
MVGridDing0.routing
|
train
|
def routing(self, debug=False, anim=None):
""" Performs routing on Load Area centres to build MV grid with ring topology.
Args
----
debug: bool, defaults to False
If True, information is printed while routing
anim: type, defaults to None
Descr #TODO
"""
# do the routing
self._graph = mv_routing.solve(graph=self._graph,
debug=debug,
anim=anim)
logger.info('==> MV Routing for {} done'.format(repr(self)))
# connect satellites (step 1, with restrictions like max. string length, max peak load per string)
self._graph = mv_connect.mv_connect_satellites(mv_grid=self,
graph=self._graph,
mode='normal',
debug=debug)
logger.info('==> MV Sat1 for {} done'.format(repr(self)))
# connect satellites to closest line/station on a MV ring that have not been connected in step 1
self._graph = mv_connect.mv_connect_satellites(mv_grid=self,
graph=self._graph,
mode='isolated',
debug=debug)
logger.info('==> MV Sat2 for {} done'.format(repr(self)))
# connect stations
self._graph = mv_connect.mv_connect_stations(mv_grid_district=self.grid_district,
graph=self._graph,
debug=debug)
logger.info('==> MV Stations for {} done'.format(repr(self)))
|
python
|
{
"resource": ""
}
|
q14653
|
MVGridDing0.set_voltage_level
|
train
|
def set_voltage_level(self, mode='distance'):
""" Sets voltage level of MV grid according to load density of MV Grid District or max.
distance between station and Load Area.
Parameters
----------
mode: str
method to determine voltage level
* 'load_density': Decision on voltage level is determined by load density
of the considered region. Urban areas (load density of
>= 1 MW/km2 according to [#]_) usually got a voltage of
10 kV whereas rural areas mostly use 20 kV.
* 'distance' (default): Decision on voltage level is determined by the max.
distance between Grid District's HV-MV station and Load
Areas (LA's centre is used). According to [#]_ a value of
1kV/kV can be assumed. The `voltage_per_km_threshold`
defines the distance threshold for distinction.
(default in config = (20km+10km)/2 = 15km)
References
----------
.. [#] Falk Schaller et al., "Modellierung realitätsnaher zukünftiger Referenznetze im Verteilnetzsektor zur
Überprüfung der Elektroenergiequalität", Internationaler ETG-Kongress Würzburg, 2011
.. [#] Klaus Heuck et al., "Elektrische Energieversorgung", Vieweg+Teubner, Wiesbaden, 2007
"""
if mode == 'load_density':
# get power factor for loads
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
# get load density
load_density_threshold = float(cfg_ding0.get('assumptions',
'load_density_threshold'))
# transform MVGD's area to epsg 3035
# to achieve correct area calculation
projection = partial(
pyproj.transform,
pyproj.Proj(init='epsg:4326'), # source coordinate system
pyproj.Proj(init='epsg:3035')) # destination coordinate system
# calculate load density
kw2mw = 1e-3
sqm2sqkm = 1e6
load_density = ((self.grid_district.peak_load * kw2mw / cos_phi_load) /
(transform(projection, self.grid_district.geo_data).area / sqm2sqkm)) # unit MVA/km^2
# identify voltage level
if load_density < load_density_threshold:
self.v_level = 20
elif load_density >= load_density_threshold:
self.v_level = 10
else:
raise ValueError('load_density is invalid!')
elif mode == 'distance':
# get threshold for 20/10kV disambiguation
voltage_per_km_threshold = float(cfg_ding0.get('assumptions',
'voltage_per_km_threshold'))
# initial distance
dist_max = 0
import time
start = time.time()
for node in self.graph_nodes_sorted():
if isinstance(node, LVLoadAreaCentreDing0):
# calc distance from MV-LV station to LA centre
dist_node = calc_geo_dist_vincenty(self.station(), node) / 1e3
if dist_node > dist_max:
dist_max = dist_node
# max. occurring distance to a Load Area exceeds threshold => grid operates at 20kV
if dist_max >= voltage_per_km_threshold:
self.v_level = 20
# not: grid operates at 10kV
else:
self.v_level = 10
else:
raise ValueError('parameter \'mode\' is invalid!')
|
python
|
{
"resource": ""
}
|
q14654
|
MVGridDing0.set_default_branch_type
|
train
|
def set_default_branch_type(self, debug=False):
""" Determines default branch type according to grid district's peak load and standard equipment.
Args
----
debug: bool, defaults to False
If True, information is printed during process
Returns
-------
:pandas:`pandas.Series<series>`
default branch type: pandas Series object. If no appropriate type is found, return largest possible one.
:pandas:`pandas.Series<series>`
default branch type max: pandas Series object. Largest available line/cable type
Notes
-----
Parameter values for cables and lines are taken from [#]_, [#]_ and [#]_.
Lines are chosen to have 60 % load relative to their nominal capacity according to [#]_.
Decision on usage of overhead lines vs. cables is determined by load density of the considered region. Urban
areas usually are equipped with underground cables whereas rural areas often have overhead lines as MV
distribution system [#]_.
References
----------
.. [#] Klaus Heuck et al., "Elektrische Energieversorgung", Vieweg+Teubner, Wiesbaden, 2007
.. [#] René Flosdorff et al., "Elektrische Energieverteilung", Vieweg+Teubner, 2005
.. [#] Südkabel GmbH, "Einadrige VPE-isolierte Mittelspannungskabel",
http://www.suedkabel.de/cms/upload/pdf/Garnituren/Einadrige_VPE-isolierte_Mittelspannungskabel.pdf, 2017
.. [#] Deutsche Energie-Agentur GmbH (dena), "dena-Verteilnetzstudie. Ausbau- und Innovationsbedarf der
Stromverteilnetze in Deutschland bis 2030.", 2012
.. [#] Tao, X., "Automatisierte Grundsatzplanung von
Mittelspannungsnetzen", Dissertation, RWTH Aachen, 2007
"""
# decide whether cable or line is used (initially for entire grid) and set grid's attribute
if self.v_level == 20:
self.default_branch_kind = 'line'
elif self.v_level == 10:
self.default_branch_kind = 'cable'
# get power factor for loads
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
# get max. count of half rings per MV grid district
mv_half_ring_count_max = int(cfg_ding0.get('mv_routing_tech_constraints',
'mv_half_ring_count_max'))
#mv_half_ring_count_max=20
# load cable/line assumptions, file_names and parameter
if self.default_branch_kind == 'line':
load_factor_normal = float(cfg_ding0.get('assumptions',
'load_factor_mv_line_lc_normal'))
branch_parameters = self.network.static_data['MV_overhead_lines']
# load cables as well to use it within settlements
branch_parameters_settle = self.network.static_data['MV_cables']
# select types with appropriate voltage level
branch_parameters_settle = branch_parameters_settle[branch_parameters_settle['U_n'] == self.v_level]
elif self.default_branch_kind == 'cable':
load_factor_normal = float(cfg_ding0.get('assumptions',
'load_factor_mv_cable_lc_normal'))
branch_parameters = self.network.static_data['MV_cables']
else:
raise ValueError('Grid\'s default_branch_kind is invalid, could not set branch parameters.')
# select appropriate branch params according to voltage level, sorted ascending by max. current
# use <240mm2 only (ca. 420A) for initial rings and for disambiguation of agg. LA
branch_parameters = branch_parameters[branch_parameters['U_n'] == self.v_level]
branch_parameters = branch_parameters[branch_parameters['reinforce_only'] == 0].sort_values('I_max_th')
# get largest line/cable type
branch_type_max = branch_parameters.loc[branch_parameters['I_max_th'].idxmax()]
# set aggregation flag using largest available line/cable
self.set_nodes_aggregation_flag(branch_type_max['I_max_th'] * load_factor_normal)
# calc peak current sum (= "virtual" current) of whole grid (I = S / sqrt(3) / U) excluding load areas of type
# satellite and aggregated
peak_current_sum = ((self.grid_district.peak_load -
self.grid_district.peak_load_satellites -
self.grid_district.peak_load_aggregated) /
cos_phi_load /
(3**0.5) / self.v_level) # units: kVA / kV = A
branch_type_settle = branch_type_settle_max = None
# search the smallest possible line/cable for MV grid district in equipment datasets for all load areas
# excluding those of type satellite and aggregated
for idx, row in branch_parameters.iterrows():
# calc number of required rings using peak current sum of grid district,
# load factor and max. current of line/cable
half_ring_count = round(peak_current_sum / (row['I_max_th'] * load_factor_normal))
if debug:
logger.debug('=== Selection of default branch type in {} ==='.format(self))
logger.debug('Peak load= {} kVA'.format(self.grid_district.peak_load))
logger.debug('Peak current={}'.format(peak_current_sum))
logger.debug('I_max_th={}'.format(row['I_max_th']))
logger.debug('Half ring count={}'.format(half_ring_count))
# if count of half rings is below or equal max. allowed count, use current branch type as default
if half_ring_count <= mv_half_ring_count_max:
if self.default_branch_kind == 'line':
# take only cables that can handle at least the current of the line
branch_parameters_settle_filter = branch_parameters_settle[\
branch_parameters_settle['I_max_th'] - row['I_max_th'] > 0]
# get cable type with similar (but greater) I_max_th
# note: only grids with lines as default branch kind get cables in settlements
# (not required in grids with cables as default branch kind)
branch_type_settle = branch_parameters_settle_filter.loc[\
branch_parameters_settle_filter['I_max_th'].idxmin()]
return row, branch_type_max, branch_type_settle
# no equipment was found, return largest available line/cable
if debug:
logger.debug('No appropriate line/cable type could be found for '
'{}, declare some load areas as aggregated.'.format(self))
if self.default_branch_kind == 'line':
branch_type_settle_max = branch_parameters_settle.loc[branch_parameters_settle['I_max_th'].idxmax()]
return branch_type_max, branch_type_max, branch_type_settle_max
|
python
|
{
"resource": ""
}
|
q14655
|
MVGridDing0.set_nodes_aggregation_flag
|
train
|
def set_nodes_aggregation_flag(self, peak_current_branch_max):
""" Set Load Areas with too high demand to aggregated type.
Args
----
peak_current_branch_max: float
Max. allowed current for line/cable
"""
for lv_load_area in self.grid_district.lv_load_areas():
peak_current_node = (lv_load_area.peak_load / (3**0.5) / self.v_level) # units: kVA / kV = A
if peak_current_node > peak_current_branch_max:
lv_load_area.is_aggregated = True
# add peak demand for all Load Areas of aggregation type
self.grid_district.add_aggregated_peak_demand()
|
python
|
{
"resource": ""
}
|
q14656
|
MVGridDing0.export_to_pypsa
|
train
|
def export_to_pypsa(self, session, method='onthefly'):
"""Exports MVGridDing0 grid to PyPSA database tables
Peculiarities of MV grids are implemented here. Derive general export
method from this and adapt to needs of LVGridDing0
Parameters
----------
session: :sqlalchemy:`SQLAlchemy session object<orm/session_basics.html>`
Description
method: str
Specify export method::
'db': grid data will be exported to database
'onthefly': grid data will be passed to PyPSA directly (default)
Notes
-----
It has to be proven that this method works for LV grids as well!
Ding0 treats two stationary case of powerflow:
1) Full load: We assume no generation and loads to be set to peak load
2) Generation worst case:
"""
# definitions for temp_resolution table
temp_id = 1
timesteps = 2
start_time = datetime(1970, 1, 1, 00, 00, 0)
resolution = 'H'
nodes = self._graph.nodes()
edges = [edge for edge in list(self.graph_edges())
if (edge['adj_nodes'][0] in nodes and not isinstance(
edge['adj_nodes'][0], LVLoadAreaCentreDing0))
and (edge['adj_nodes'][1] in nodes and not isinstance(
edge['adj_nodes'][1], LVLoadAreaCentreDing0))]
if method == 'db':
# Export node objects: Busses, Loads, Generators
pypsa_io.export_nodes(self,
session,
nodes,
temp_id,
lv_transformer=False)
# Export edges
pypsa_io.export_edges(self, session, edges)
# Create table about temporal coverage of PF analysis
pypsa_io.create_temp_resolution_table(session,
timesteps=timesteps,
resolution=resolution,
start_time=start_time)
elif method == 'onthefly':
nodes_dict, components_data = pypsa_io.nodes_to_dict_of_dataframes(
self,
nodes,
lv_transformer=False)
edges_dict = pypsa_io.edges_to_dict_of_dataframes(self, edges)
components = tools.merge_two_dicts(nodes_dict, edges_dict)
return components, components_data
else:
raise ValueError('Sorry, this export method does not exist!')
|
python
|
{
"resource": ""
}
|
q14657
|
MVGridDing0.import_powerflow_results
|
train
|
def import_powerflow_results(self, session):
"""Assign results from power flow analysis to edges and nodes
Parameters
----------
session: :sqlalchemy:`SQLAlchemy session object<orm/session_basics.html>`
Description
"""
# bus data
pypsa_io.import_pfa_bus_results(session, self)
# line data
pypsa_io.import_pfa_line_results(session, self)
|
python
|
{
"resource": ""
}
|
q14658
|
LVGridDing0.add_station
|
train
|
def add_station(self, lv_station):
"""Adds a LV station to _station and grid graph if not already existing"""
if not isinstance(lv_station, LVStationDing0):
raise Exception('Given LV station is not a LVStationDing0 object.')
if self._station is None:
self._station = lv_station
self.graph_add_node(lv_station)
self.grid_district.lv_load_area.mv_grid_district.mv_grid.graph_add_node(lv_station)
|
python
|
{
"resource": ""
}
|
q14659
|
LVGridDing0.build_grid
|
train
|
def build_grid(self):
"""Create LV grid graph
"""
# add required transformers
build_grid.transformer(self)
# add branches of sectors retail/industrial and agricultural
build_grid.build_ret_ind_agr_branches(self.grid_district)
# add branches of sector residential
build_grid.build_residential_branches(self.grid_district)
|
python
|
{
"resource": ""
}
|
q14660
|
select_transformers
|
train
|
def select_transformers(grid, s_max=None):
"""Selects LV transformer according to peak load of LV grid district.
The transformers are chosen according to max. of load case and feedin-case
considering load factors and power factor.
The MV-LV transformer with the next higher available nominal apparent power is
chosen. Therefore, a max. allowed transformer loading of 100% is implicitly
assumed. If the peak load exceeds the max. power of a single available
transformer, multiple transformer are build.
By default `peak_load` and `peak_generation` are taken from `grid` instance.
The behavior can be overridden providing `s_max` as explained in
``Arguments``.
Parameters
----------
grid: LVGridDing0
LV grid data
Arguments
---------
s_max : dict
dict containing maximum apparent power of load or generation case and
str describing the case. For example
.. code-block:: python
{
's_max': 480,
'case': 'load'
}
or
.. code-block:: python
{
's_max': 120,
'case': 'gen'
}
s_max passed overrides `grid.grid_district.peak_load` respectively
`grid.station().peak_generation`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Parameters of chosen Transformer
:obj:`int`
Count of transformers
Notes
-----
The LV transformer with the next higher available nominal apparent power is
chosen. Therefore, a max. allowed transformer loading of 100% is implicitly
assumed. If the peak load exceeds the max. power of a single available
transformer, use multiple trafos.
"""
load_factor_lv_trans_lc_normal = cfg_ding0.get('assumptions',
'load_factor_lv_trans_lc_normal')
load_factor_lv_trans_fc_normal = cfg_ding0.get('assumptions',
'load_factor_lv_trans_fc_normal')
cos_phi_load = cfg_ding0.get('assumptions',
'cos_phi_load')
cos_phi_gen = cfg_ding0.get('assumptions',
'cos_phi_gen')
# get equipment parameters of LV transformers
trafo_parameters = grid.network.static_data['LV_trafos']
# determine s_max from grid object if not provided via arguments
if s_max is None:
# get maximum from peak load and peak generation
s_max_load = grid.grid_district.peak_load / cos_phi_load
s_max_gen = grid.station().peak_generation / cos_phi_gen
# check if load or generation is greater respecting corresponding load factor
if s_max_load > s_max_gen:
# use peak load and load factor from load case
load_factor_lv_trans = load_factor_lv_trans_lc_normal
s_max = s_max_load
else:
# use peak generation and load factor for feedin case
load_factor_lv_trans = load_factor_lv_trans_fc_normal
s_max = s_max_gen
else:
if s_max['case'] == 'load':
load_factor_lv_trans = load_factor_lv_trans_lc_normal
elif s_max['case'] == 'gen':
load_factor_lv_trans = load_factor_lv_trans_fc_normal
else:
logger.error('No proper \'case\' provided for argument s_max')
raise ValueError('Please provide proper \'case\' for argument '
'`s_max`.')
s_max = s_max['s_max']
# get max. trafo
transformer_max = trafo_parameters.iloc[trafo_parameters['S_nom'].idxmax()]
# peak load is smaller than max. available trafo
if s_max < (transformer_max['S_nom'] * load_factor_lv_trans ):
# choose trafo
transformer = trafo_parameters.iloc[
trafo_parameters[
trafo_parameters['S_nom'] * load_factor_lv_trans > s_max][
'S_nom'].idxmin()]
transformer_cnt = 1
# peak load is greater than max. available trafo -> use multiple trafos
else:
transformer_cnt = 2
# increase no. of trafos until peak load can be supplied
while not any(trafo_parameters['S_nom'] * load_factor_lv_trans > (
s_max / transformer_cnt)):
transformer_cnt += 1
transformer = trafo_parameters.iloc[
trafo_parameters[
trafo_parameters['S_nom'] * load_factor_lv_trans
> (s_max / transformer_cnt)]['S_nom'].idxmin()]
return transformer, transformer_cnt
|
python
|
{
"resource": ""
}
|
q14661
|
transformer
|
train
|
def transformer(grid):
""" Choose transformer and add to grid's station
Parameters
----------
grid: LVGridDing0
LV grid data
"""
# choose size and amount of transformers
transformer, transformer_cnt = select_transformers(grid)
# create transformers and add them to station of LVGD
for t in range(0, transformer_cnt):
lv_transformer = TransformerDing0(
grid=grid,
id_db=id,
v_level=0.4,
s_max_longterm=transformer['S_nom'],
r=transformer['R'],
x=transformer['X'])
# add each transformer to its station
grid._station.add_transformer(lv_transformer)
|
python
|
{
"resource": ""
}
|
q14662
|
select_grid_model_residential
|
train
|
def select_grid_model_residential(lvgd):
"""Selects typified model grid based on population
Parameters
----------
lvgd : LVGridDistrictDing0
Low-voltage grid district object
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Selected string of typified model grid
:pandas:`pandas.DataFrame<dataframe>`
Parameters of chosen Transformer
Notes
-----
In total 196 distinct LV grid topologies are available that are chosen
by population in the LV grid district. Population is translated to
number of house branches. Each grid model fits a number of house
branches. If this number exceeds 196, still the grid topology of 196
house branches is used. The peak load of the LV grid district is
uniformly distributed across house branches.
"""
# Load properties of LV typified model grids
string_properties = lvgd.lv_grid.network.static_data['LV_model_grids_strings']
# Load relational table of apartment count and strings of model grid
apartment_string = lvgd.lv_grid.network.static_data[
'LV_model_grids_strings_per_grid']
# load assumtions
apartment_house_branch_ratio = cfg_ding0.get("assumptions",
"apartment_house_branch_ratio")
population_per_apartment = cfg_ding0.get("assumptions",
"population_per_apartment")
# calc count of apartments to select string types
apartments = round(lvgd.population / population_per_apartment)
if apartments > 196:
apartments = 196
# select set of strings that represent one type of model grid
strings = apartment_string.loc[apartments]
selected_strings = [int(s) for s in strings[strings >= 1].index.tolist()]
# slice dataframe of string parameters
selected_strings_df = string_properties.loc[selected_strings]
# add number of occurences of each branch to df
occurence_selector = [str(i) for i in selected_strings]
selected_strings_df['occurence'] = strings.loc[occurence_selector].tolist()
return selected_strings_df
|
python
|
{
"resource": ""
}
|
q14663
|
build_residential_branches
|
train
|
def build_residential_branches(lvgd):
"""Based on population and identified peak load data, the according grid
topology for residential sector is determined and attached to the grid graph
Parameters
----------
lvgd : LVGridDistrictDing0
Low-voltage grid district object
"""
# Choice of typified lv model grid depends on population within lv
# grid district. If no population is given, lv grid is omitted and
# load is represented by lv station's peak load
if lvgd.population > 0 \
and lvgd.peak_load_residential > 0:
model_grid = select_grid_model_residential(lvgd)
build_lv_graph_residential(lvgd, model_grid)
# no residential load but population
elif lvgd.population > 0 \
and lvgd.peak_load_residential == 0:
logger.warning(
'{} has population but no residential load. '
'No grid is created.'.format(
repr(lvgd)))
# residential load but no population
elif lvgd.population == 0 \
and lvgd.peak_load_residential > 0:
logger.warning(
'{} has no population but residential load. '
'No grid is created and thus this load is '
'missing in overall balance!'.format(
repr(lvgd)))
else:
logger.info(
'{} has got no residential load. '
'No grid is created.'.format(
repr(lvgd)))
|
python
|
{
"resource": ""
}
|
q14664
|
lv_grid_generators_bus_bar
|
train
|
def lv_grid_generators_bus_bar(nd):
"""
Calculate statistics about generators at bus bar in LV grids
Parameters
----------
nd : ding0.NetworkDing0
Network container object
Returns
-------
lv_stats : dict
Dict with keys of LV grid repr() on first level. Each of the grids has
a set of statistical information about its topology
"""
lv_stats = {}
for la in nd._mv_grid_districts[0].lv_load_areas():
for lvgd in la.lv_grid_districts():
station_neighbors = list(lvgd.lv_grid._graph[
lvgd.lv_grid._station].keys())
# check if nodes of a statio are members of list generators
station_generators = [x for x in station_neighbors
if x in lvgd.lv_grid.generators()]
lv_stats[repr(lvgd.lv_grid._station)] = station_generators
return lv_stats
|
python
|
{
"resource": ""
}
|
q14665
|
plot_cable_length
|
train
|
def plot_cable_length(stats, plotpath):
"""
Cable length per MV grid district
"""
# cable and line kilometer distribution
f, axarr = plt.subplots(2, 2, sharex=True)
stats.hist(column=['Length of MV overhead lines'], bins=5, alpha=0.5, ax=axarr[0, 0])
stats.hist(column=['Length of MV underground cables'], bins=5, alpha=0.5, ax=axarr[0, 1])
stats.hist(column=['Length of LV overhead lines'], bins=5, alpha=0.5, ax=axarr[1, 0])
stats.hist(column=['Length of LV underground cables'], bins=5, alpha=0.5, ax=axarr[1, 1])
plt.savefig(os.path.join(plotpath,
'Histogram_cable_line_length.pdf'))
|
python
|
{
"resource": ""
}
|
q14666
|
plot_generation_over_load
|
train
|
def plot_generation_over_load(stats, plotpath):
"""
Plot of generation over load
"""
# Generation capacity vs. peak load
sns.set_context("paper", font_scale=1.1)
sns.set_style("ticks")
# reformat to MW
gen_cap_indexes = ["Gen. Cap. of MV at v_level 4",
"Gen. Cap. of MV at v_level 5",
"Gen. Cap. of LV at v_level 6",
"Gen. Cap. of LV at v_level 7"]
peak_load_index = ["LA Total LV Peak Load total"]
stats['generation_capacity'] = stats[gen_cap_indexes].sum(axis=1) / 1e3
stats['peak_load'] = stats[peak_load_index] / 1e3
sns.lmplot('generation_capacity', 'peak_load',
data=stats,
fit_reg=False,
# hue='v_nom',
# hue='Voltage level',
scatter_kws={"marker": "D",
"s": 100},
aspect=2)
plt.title('Peak load vs. generation capacity')
plt.xlabel('Generation capacity in MW')
plt.ylabel('Peak load in MW')
plt.savefig(os.path.join(plotpath,
'Scatter_generation_load.pdf'))
|
python
|
{
"resource": ""
}
|
q14667
|
concat_nd_pickles
|
train
|
def concat_nd_pickles(self, mv_grid_districts):
"""
Read multiple pickles, join nd objects and save to file
Parameters
----------
mv_grid_districts : list
Ints describing MV grid districts
"""
pickle_name = cfg_ding0.get('output', 'nd_pickle')
# self.nd = self.read_pickles_from_files(pickle_name)
# TODO: instead of passing a list of mvgd's, pass list of filenames plus optionally a basth_path
for mvgd in mv_grid_districts[1:]:
filename = os.path.join(
self.base_path,
'results', pickle_name.format(mvgd))
if os.path.isfile(filename):
mvgd_pickle = pickle.load(open(filename, 'rb'))
if mvgd_pickle._mv_grid_districts:
mvgd.add_mv_grid_district(mvgd_pickle._mv_grid_districts[0])
# save to concatenated pickle
pickle.dump(mvgd,
open(os.path.join(
self.base_path,
'results',
"ding0_grids_{0}-{1}.pkl".format(
mv_grid_districts[0],
mv_grid_districts[-1])),
"wb"))
# save stats (edges and nodes data) to csv
nodes, edges = mvgd.to_dataframe()
nodes.to_csv(os.path.join(
self.base_path,
'results', 'mvgd_nodes_stats_{0}-{1}.csv'.format(
mv_grid_districts[0], mv_grid_districts[-1])),
index=False)
edges.to_csv(os.path.join(
self.base_path,
'results', 'mvgd_edges_stats_{0}-{1}.csv'.format(
mv_grid_districts[0], mv_grid_districts[-1])),
index=False)
|
python
|
{
"resource": ""
}
|
q14668
|
calculate_mvgd_voltage_current_stats
|
train
|
def calculate_mvgd_voltage_current_stats(nw):
"""
MV Voltage and Current Statistics for an arbitrary network
Parameters
----------
nw: :any:`list` of NetworkDing0
The MV grid(s) to be studied
Returns
-------
pandas.DataFrame
nodes_df : Dataframe containing voltage statistics for every node in the MVGD
pandas.DataFrame
lines_df : Dataframe containing voltage statistics for every edge in the MVGD
"""
##############################
# close circuit breakers
nw.control_circuit_breakers(mode='close')
##############################
nodes_idx = 0
nodes_dict = {}
branches_idx = 0
branches_dict = {}
for district in nw.mv_grid_districts():
# nodes voltage
for node in district.mv_grid.graph_nodes_sorted():
nodes_idx += 1
if hasattr(node, 'voltage_res'):
Vres0 = node.voltage_res[0]
Vres1 = node.voltage_res[1]
else:
Vres0 = 'Not available'
Vres1 = 'Not available'
nodes_dict[nodes_idx] = {'MV_grid_id': district.mv_grid.id_db,
'node id': node.__repr__(),
'V_res_0': Vres0,
'V_res_1': Vres1,
'V nominal': district.mv_grid.v_level}
# branches currents
for branch in district.mv_grid.graph_edges():
branches_idx += 1
if hasattr(branch['branch'], 's_res'):
s_res0 = branch['branch'].s_res[0]
s_res1 = branch['branch'].s_res[1]
else:
s_res0 = 'Not available'
s_res1 = 'Not available'
branches_dict[branches_idx] = {
'MV_grid_id': district.mv_grid.id_db,
'branch id': branch['branch'].__repr__(), # .id_db
's_res_0': s_res0,
's_res_1': s_res1,
# 'length': branch['branch'].length / 1e3,
}
nodes_df = pd.DataFrame.from_dict(nodes_dict, orient='index')
branches_df = pd.DataFrame.from_dict(branches_dict, orient='index')
if not nodes_df.empty:
nodes_df = nodes_df.set_index('node id')
nodes_df = nodes_df.fillna(0)
nodes_df = nodes_df[sorted(nodes_df.columns.tolist())]
nodes_df.sort_index(inplace=True)
if not branches_df.empty:
branches_df = branches_df.set_index('branch id')
branches_df = branches_df.fillna(0)
branches_df = branches_df[sorted(branches_df.columns.tolist())]
branches_df.sort_index(inplace=True)
return (nodes_df, branches_df)
|
python
|
{
"resource": ""
}
|
q14669
|
calculate_lvgd_voltage_current_stats
|
train
|
def calculate_lvgd_voltage_current_stats(nw):
"""
LV Voltage and Current Statistics for an arbitrary network
Note
----
Aggregated Load Areas are excluded.
Parameters
----------
nw: :any:`list` of NetworkDing0
The MV grid(s) to be studied
Returns
-------
pandas.DataFrame
nodes_df : Dataframe containing voltage, respectively current, statis
for every critical node, resp. every critical station, in every LV grid
in nw.
pandas.DataFrame
lines_df : Dataframe containing current statistics for every critical
line, in every LV grid in nw.
"""
##############################
# close circuit breakers
nw.control_circuit_breakers(mode='close')
##############################
nodes_idx = 0
nodes_dict = {}
branches_idx = 0
branches_dict = {}
for mv_district in nw.mv_grid_districts():
for LA in mv_district.lv_load_areas():
if not LA.is_aggregated:
for lv_district in LA.lv_grid_districts():
# nodes voltage
crit_nodes = get_critical_voltage_at_nodes(lv_district.lv_grid)
for node in crit_nodes:
nodes_idx += 1
nodes_dict[nodes_idx] = {
'MV_grid_id': mv_district.mv_grid.id_db,
'LV_grid_id': lv_district.lv_grid.id_db,
'LA_id': LA.id_db,
'node id': node['node'].__repr__(),
'v_diff_0': node['v_diff'][0],
'v_diff_1': node['v_diff'][1],
's_max_0': 'NA',
's_max_1': 'NA',
'V nominal': lv_district.lv_grid.v_level,
}
# branches currents
critical_branches, critical_stations = get_critical_line_loading(lv_district.lv_grid)
for branch in critical_branches:
branches_idx += 1
branches_dict[branches_idx] = {
'MV_grid_id': mv_district.mv_grid.id_db,
'LV_grid_id': lv_district.lv_grid.id_db,
'LA_id': LA.id_db,
'branch id': branch['branch'].__repr__(),
's_max_0': branch['s_max'][0],
's_max_1': branch['s_max'][1],
}
# stations
for node in critical_stations:
nodes_idx += 1
nodes_dict[nodes_idx] = {
'MV_grid_id': mv_district.mv_grid.id_db,
'LV_grid_id': lv_district.lv_grid.id_db,
'LA_id': LA.id_db,
'node id': node['station'].__repr__(),
's_max_0': node['s_max'][0],
's_max_1': node['s_max'][1],
'v_diff_0': 'NA',
'v_diff_1': 'NA',
}
nodes_df = pd.DataFrame.from_dict(nodes_dict, orient='index')
branches_df = pd.DataFrame.from_dict(branches_dict, orient='index')
if not nodes_df.empty:
nodes_df = nodes_df.set_index('node id')
nodes_df = nodes_df.fillna(0)
nodes_df = nodes_df[sorted(nodes_df.columns.tolist())]
nodes_df.sort_index(inplace=True)
if not branches_df.empty:
branches_df = branches_df.set_index('branch id')
branches_df = branches_df.fillna(0)
branches_df = branches_df[sorted(branches_df.columns.tolist())]
branches_df.sort_index(inplace=True)
return nodes_df, branches_df
|
python
|
{
"resource": ""
}
|
q14670
|
init_mv_grid
|
train
|
def init_mv_grid(mv_grid_districts=[3545], filename='ding0_tests_grids_1.pkl'):
'''Runs ding0 over the districtis selected in mv_grid_districts
It also writes the result in filename. If filename = False,
then the network is not saved.
Parameters
----------
mv_grid_districts: :any:`list` of :obj:`int`
Districts IDs: Defaults to [3545]
filename: str
Defaults to 'ding0_tests_grids_1.pkl'
If filename=False, then the network is not saved
Returns
-------
NetworkDing0
The created MV network.
'''
print('\n########################################')
print(' Running ding0 for district', mv_grid_districts)
# database connection/ session
engine = db.connection(section='oedb')
session = sessionmaker(bind=engine)()
# instantiate new ding0 network object
nd = NetworkDing0(name='network')
# run DINGO on selected MV Grid District
nd.run_ding0(session=session, mv_grid_districts_no=mv_grid_districts)
# export grid to file (pickle)
if filename:
print('\n########################################')
print(' Saving result in ', filename)
save_nd_to_pickle(nd, filename=filename)
print('\n########################################')
return nd
|
python
|
{
"resource": ""
}
|
q14671
|
process_stats
|
train
|
def process_stats(mv_districts,
n_of_districts,
source,
mode,
critical,
filename,
output):
'''Generates stats dataframes for districts in mv_districts.
If source=='ding0', then runned districts are saved to a pickle named
filename+str(n_of_districts[0])+'_to_'+str(n_of_districts[-1])+'.pkl'
Parameters
----------
districts_list: list of int
List with all districts to be run.
n_of_districts: int
Number of districts to be run in each cluster
source: str
If 'pkl', pickle files are read.
If 'ding0', ding0 is run over the districts.
mode: str
If 'MV', medium voltage stats are calculated.
If 'LV', low voltage stats are calculated.
If empty, medium and low voltage stats are calculated.
critical: bool
If True, critical nodes and branches are returned
filename: str
filename prefix for saving pickles
output:
outer variable where the output is stored as a tuple of 6 lists::
* mv_stats: MV stats DataFrames.
If mode=='LV', then DataFrame is empty.
* lv_stats: LV stats DataFrames.
If mode=='MV', then DataFrame is empty.
* mv_crit_nodes: MV critical nodes stats DataFrames.
If mode=='LV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
* mv_crit_edges: MV critical edges stats DataFrames.
If mode=='LV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
* lv_crit_nodes: LV critical nodes stats DataFrames.
If mode=='MV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
* lv_crit_edges: LV critical edges stats DataFrames.
If mode=='MV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
'''
#######################################################################
# decide what exactly to do with MV LV
if mode == 'MV':
calc_mv = True
calc_lv = False
elif mode == 'LV':
calc_mv = False
calc_lv = True
else:
calc_mv = True
calc_lv = True
#######################################################################
clusters = [mv_districts[x:x + n_of_districts] for x in range(0, len(mv_districts), n_of_districts)]
mv_stats = []
lv_stats = []
mv_crit_nodes = []
mv_crit_edges = []
lv_crit_nodes = []
lv_crit_edges = []
#######################################################################
for cl in clusters:
nw_name = filename + str(cl[0])
if not cl[0] == cl[-1]:
nw_name = nw_name + '_to_' + str(cl[-1])
nw = NetworkDing0(name=nw_name)
if source == 'pkl':
print('\n########################################')
print(' Reading data from pickle district', cl)
print('########################################')
try:
nw = load_nd_from_pickle(nw_name + '.pkl')
except Exception:
continue
else:
# database connection/ session
engine = db.connection(section='oedb')
session = sessionmaker(bind=engine)()
print('\n########################################')
print(' Running ding0 for district', cl)
print('########################################')
try:
nw.run_ding0(session=session, mv_grid_districts_no=cl)
try:
save_nd_to_pickle(nw, filename=nw_name + '.pkl')
except Exception:
continue
except Exception:
continue
# Close database connection
if calc_mv:
stats = calculate_mvgd_stats(nw)
mv_stats.append(stats)
if calc_lv:
stats = calculate_lvgd_stats(nw)
lv_stats.append(stats)
if critical and calc_mv:
stats = calculate_mvgd_voltage_current_stats(nw)
mv_crit_nodes.append(stats[0])
mv_crit_edges.append(stats[1])
if critical and calc_lv:
stats = calculate_lvgd_voltage_current_stats(nw)
lv_crit_nodes.append(stats[0])
lv_crit_edges.append(stats[1])
#######################################################################
salida = (mv_stats, lv_stats, mv_crit_nodes, mv_crit_edges, lv_crit_nodes, lv_crit_edges)
output.put(salida)
|
python
|
{
"resource": ""
}
|
q14672
|
ding0_exemplary_plots
|
train
|
def ding0_exemplary_plots(stats, base_path=BASEPATH):
"""
Analyze multiple grid district data generated with Ding0.
Parameters
----------
stats : pandas.DataFrame
Statistics of each MV grid districts
base_path : str
Root directory of Ding0 data structure, i.e. '~/.ding0' (which is
default).
"""
# make some plot
plotpath = os.path.join(base_path, 'plots')
results.plot_cable_length(stats, plotpath)
plt.show()
results.plot_generation_over_load(stats, plotpath)
plt.show()
|
python
|
{
"resource": ""
}
|
q14673
|
nd_load_and_stats
|
train
|
def nd_load_and_stats(filenames, base_path=BASEPATH):
"""
Load multiple files from disk and generate stats
Passes the list of files assuming the ding0 data structure as default in
:code:`~/.ding0`.
Data will be concatenated and key indicators for each grid district are
returned in table and graphic format.
Parameters
----------
filenames : list of str
Provide list of files you want to analyze
base_path : str
Root directory of Ding0 data structure, i.e. '~/.ding0' (which is
default).
Returns
-------
stats : pandas.DataFrame
Statistics of each MV grid districts
"""
# load Ding0 data
nds = []
for filename in filenames:
try:
nd_load = results.load_nd_from_pickle(filename=
os.path.join(base_path,
'grids',
filename))
nds.append(nd_load)
except:
print("File {mvgd} not found. It was maybe excluded by Ding0 or "
"just forgotten to generate by you...".format(mvgd=filename))
nd = nds[0]
for n in nds[1:]:
nd.add_mv_grid_district(n._mv_grid_districts[0])
# get statistical numbers about grid
stats = results.calculate_mvgd_stats(nd)
return stats
|
python
|
{
"resource": ""
}
|
q14674
|
extend_substation
|
train
|
def extend_substation(grid, critical_stations, grid_level):
"""
Reinforce MV or LV substation by exchanging the existing trafo and
installing a parallel one if necessary.
First, all available transformers in a `critical_stations` are extended to
maximum power. If this does not solve all present issues, additional
transformers are build.
Parameters
----------
grid: GridDing0
Ding0 grid container
critical_stations : :any:`list`
List of stations with overloading
grid_level : str
Either "LV" or "MV". Basis to select right equipment.
Notes
-----
Curently straight forward implemented for LV stations
Returns
-------
type
#TODO: Description of return. Change type in the previous line accordingly
"""
load_factor_lv_trans_lc_normal = cfg_ding0.get(
'assumptions',
'load_factor_lv_trans_lc_normal')
load_factor_lv_trans_fc_normal = cfg_ding0.get(
'assumptions',
'load_factor_lv_trans_fc_normal')
trafo_params = grid.network._static_data['{grid_level}_trafos'.format(
grid_level=grid_level)]
trafo_s_max_max = max(trafo_params['S_nom'])
for station in critical_stations:
# determine if load or generation case and apply load factor
if station['s_max'][0] > station['s_max'][1]:
case = 'load'
lf_lv_trans_normal = load_factor_lv_trans_lc_normal
else:
case = 'gen'
lf_lv_trans_normal = load_factor_lv_trans_fc_normal
# cumulative maximum power of transformers installed
s_max_trafos = sum([_.s_max_a
for _ in station['station']._transformers])
# determine missing trafo power to solve overloading issue
s_trafo_missing = max(station['s_max']) - (
s_max_trafos * lf_lv_trans_normal)
# list of trafos with rated apparent power below `trafo_s_max_max`
extendable_trafos = [_ for _ in station['station']._transformers
if _.s_max_a < trafo_s_max_max]
# try to extend power of existing trafos
while (s_trafo_missing > 0) and extendable_trafos:
# only work with first of potentially multiple trafos
trafo = extendable_trafos[0]
trafo_s_max_a_before = trafo.s_max_a
# extend power of first trafo to next higher size available
extend_trafo_power(extendable_trafos, trafo_params)
# diminish missing trafo power by extended trafo power and update
# extendable trafos list
s_trafo_missing -= ((trafo.s_max_a * lf_lv_trans_normal) -
trafo_s_max_a_before)
extendable_trafos = [_ for _ in station['station']._transformers
if _.s_max_a < trafo_s_max_max]
# build new trafos inside station until
if s_trafo_missing > 0:
trafo_type, trafo_cnt = select_transformers(grid, s_max={
's_max': s_trafo_missing,
'case': case
})
# create transformers and add them to station of LVGD
for t in range(0, trafo_cnt):
lv_transformer = TransformerDing0(
grid=grid,
id_db=id,
v_level=0.4,
s_max_longterm=trafo_type['S_nom'],
r=trafo_type['R'],
x=trafo_type['X'])
# add each transformer to its station
grid._station.add_transformer(lv_transformer)
logger.info("{stations_cnt} have been reinforced due to overloading "
"issues.".format(stations_cnt=len(critical_stations)))
|
python
|
{
"resource": ""
}
|
q14675
|
extend_substation_voltage
|
train
|
def extend_substation_voltage(crit_stations, grid_level='LV'):
"""
Extend substation if voltage issues at the substation occur
Follows a two-step procedure:
i) Existing transformers are extended by replacement with large nominal
apparent power
ii) New additional transformers added to substation (see 'Notes')
Parameters
----------
crit_stations : :any:`list`
List of stations with overloading or voltage issues.
grid_level : str
Specifiy grid level: 'MV' or 'LV'
Notes
-----
At maximum 2 new of largest (currently 630 kVA) transformer are additionally
built to resolve voltage issues at MV-LV substation bus bar.
"""
grid = crit_stations[0]['node'].grid
trafo_params = grid.network._static_data['{grid_level}_trafos'.format(
grid_level=grid_level)]
trafo_s_max_max = max(trafo_params['S_nom'])
trafo_min_size = trafo_params.loc[trafo_params['S_nom'].idxmin(), :]
v_diff_max_fc = cfg_ding0.get('assumptions', 'lv_max_v_level_fc_diff_normal')
v_diff_max_lc = cfg_ding0.get('assumptions', 'lv_max_v_level_lc_diff_normal')
tree = nx.dfs_tree(grid._graph, grid._station)
for station in crit_stations:
v_delta = max(station['v_diff'])
# get list of nodes of main branch in right order
extendable_trafos = [_ for _ in station['node']._transformers
if _.s_max_a < trafo_s_max_max]
v_delta_initially_lc = v_delta[0]
v_delta_initially_fc = v_delta[1]
new_transformers_cnt = 0
# extend existing trafo power while voltage issues exist and larger trafos
# are available
while (v_delta[0] > v_diff_max_lc) or (v_delta[1] > v_diff_max_fc):
if extendable_trafos:
# extend power of first trafo to next higher size available
extend_trafo_power(extendable_trafos, trafo_params)
elif new_transformers_cnt < 2:
# build a new transformer
lv_transformer = TransformerDing0(
grid=grid,
id_db=id,
v_level=0.4,
s_max_longterm=trafo_min_size['S_nom'],
r=trafo_min_size['R'],
x=trafo_min_size['X'])
# add each transformer to its station
grid._station.add_transformer(lv_transformer)
new_transformers_cnt += 1
# update break criteria
v_delta = get_voltage_at_bus_bar(grid, tree)
extendable_trafos = [_ for _ in station['node']._transformers
if _.s_max_a < trafo_s_max_max]
if (v_delta[0] == v_delta_initially_lc) or (
v_delta[1] == v_delta_initially_fc):
logger.warning("Extension of {station} has no effect on "
"voltage delta at bus bar. Transformation power "
"extension is halted.".format(
station=station['node']))
break
|
python
|
{
"resource": ""
}
|
q14676
|
reinforce_lv_branches_overloading
|
train
|
def reinforce_lv_branches_overloading(grid, crit_branches):
"""
Choose appropriate cable type for branches with line overloading
Parameters
----------
grid : LVGridDing0
Ding0 LV grid object
crit_branches : :any:`list`
List of critical branches incl. its line loading
Notes
-----
If maximum size cable is not capable to resolve issue due to line
overloading largest available cable type is assigned to branch.
Returns
-------
:any:`list`
unsolved_branches : List of braches no suitable cable could be found
"""
unsolved_branches = []
cable_lf = cfg_ding0.get('assumptions',
'load_factor_lv_cable_lc_normal')
cables = grid.network.static_data['LV_cables']
# resolve overloading issues for each branch segment
for branch in crit_branches:
I_max_branch_load = branch['s_max'][0]
I_max_branch_gen = branch['s_max'][1]
I_max_branch = max([I_max_branch_load, I_max_branch_gen])
suitable_cables = cables[(cables['I_max_th'] * cable_lf)
> I_max_branch]
if not suitable_cables.empty:
cable_type = suitable_cables.loc[suitable_cables['I_max_th'].idxmin(), :]
branch['branch'].type = cable_type
crit_branches.remove(branch)
else:
cable_type_max = cables.loc[cables['I_max_th'].idxmax(), :]
unsolved_branches.append(branch)
branch['branch'].type = cable_type_max
logger.error("No suitable cable type could be found for {branch} "
"with I_th_max = {current}. "
"Cable of type {cable} is chosen during "
"reinforcement.".format(
branch=branch['branch'],
cable=cable_type_max.name,
current=I_max_branch
))
return unsolved_branches
|
python
|
{
"resource": ""
}
|
q14677
|
extend_trafo_power
|
train
|
def extend_trafo_power(extendable_trafos, trafo_params):
"""
Extend power of first trafo in list of extendable trafos
Parameters
----------
extendable_trafos : :any:`list`
Trafos with rated power below maximum size available trafo
trafo_params : :pandas:`pandas.DataFrame<dataframe>`
Transformer parameters
"""
trafo = extendable_trafos[0]
trafo_s_max_a_before = trafo.s_max_a
trafo_nearest_larger = trafo_params.loc[
trafo_params.loc[
trafo_params['S_nom'] > trafo_s_max_a_before
].loc[
:, 'S_nom'
].idxmin(), :
]
trafo.s_max_a = trafo_nearest_larger['S_nom']
trafo.r = trafo_nearest_larger['R']
trafo.x = trafo_nearest_larger['X']
|
python
|
{
"resource": ""
}
|
q14678
|
GridDing0.graph_nodes_from_branch
|
train
|
def graph_nodes_from_branch(self, branch):
""" Returns nodes that are connected by `branch`
Args
----
branch: BranchDing0
Description #TODO
Returns
-------
(:obj:`GridDing0`, :obj:`GridDing0`)
2-tuple of nodes (Ding0 objects) #TODO:Check
"""
edges = nx.get_edge_attributes(self._graph, 'branch')
nodes = list(edges.keys())[list(edges.values()).index(branch)]
return nodes
|
python
|
{
"resource": ""
}
|
q14679
|
GridDing0.graph_branches_from_node
|
train
|
def graph_branches_from_node(self, node):
""" Returns branches that are connected to `node`
Args
----
node: GridDing0
Ding0 object (member of graph)
Returns
-------
:any:`list`
List of tuples (node in :obj:`GridDing0`, branch in :obj:`BranchDing0`) ::
(node , branch_0 ),
...,
(node , branch_N ),
"""
# TODO: This method can be replaced and speed up by using NetworkX' neighbors()
branches = []
branches_dict = self._graph.adj[node]
for branch in branches_dict.items():
branches.append(branch)
return sorted(branches, key=lambda _: repr(_))
|
python
|
{
"resource": ""
}
|
q14680
|
GridDing0.graph_edges
|
train
|
def graph_edges(self):
""" Returns a generator for iterating over graph edges
The edge of a graph is described by the two adjacent node and the branch
object itself. Whereas the branch object is used to hold all relevant
power system parameters.
Yields
------
int
Description #TODO check
Note
----
There are generator functions for nodes (`Graph.nodes()`) and edges
(`Graph.edges()`) in NetworkX but unlike graph nodes, which can be
represented by objects, branch objects can only be accessed by using an
edge attribute ('branch' is used here)
To make access to attributes of the branch objects simpler and more
intuitive for the user, this generator yields a dictionary for each edge
that contains information about adjacent nodes and the branch object.
Note, the construction of the dictionary highly depends on the structure
of the in-going tuple (which is defined by the needs of networkX). If
this changes, the code will break.
"""
# get edges with attributes
edges = nx.get_edge_attributes(self._graph, 'branch').items()
# sort them according to connected nodes
edges_sorted = sorted(list(edges), key=lambda _: (''.join(sorted([repr(_[0][0]),repr(_[0][1])]))))
for edge in edges_sorted:
yield {'adj_nodes': edge[0], 'branch': edge[1]}
|
python
|
{
"resource": ""
}
|
q14681
|
GridDing0.find_path
|
train
|
def find_path(self, node_source, node_target, type='nodes'):
"""Determines shortest path
Determines the shortest path from `node_source` to
`node_target` in _graph using networkx' shortest path
algorithm.
Args
----
node_source: GridDing0
source node, member of _graph
node_target: GridDing0
target node, member of _graph
type : str
Specify if nodes or edges should be returned. Default
is `nodes`
Returns
-------
:any:`list` of :obj:`GridDing0`
path: shortest path from `node_source` to `node_target` (list of nodes in _graph)
Notes
-----
WARNING: The shortest path is calculated using the count of hops, not the actual line lengths!
As long as the circuit breakers are open, this works fine since there's only one path. But if
they are closed, there are 2 possible paths. The result is a path which have min. count of hops
but might have a longer total path length than the second sone.
See networkx' function shortest_path() function for details on how the path is calculated.
"""
if (node_source in self._graph.nodes()) and (node_target in self._graph.nodes()):
path = nx.shortest_path(self._graph, node_source, node_target)
else:
raise Exception('At least one of the nodes is not a member of graph.')
if type == 'nodes':
return path
elif type == 'edges':
return [_ for _ in self._graph.edges(nbunch=path, data=True)
if (_[0] in path and _[1] in path)]
else:
raise ValueError('Please specify type as nodes or edges')
|
python
|
{
"resource": ""
}
|
q14682
|
MVGridDistrictDing0.add_lv_load_area
|
train
|
def add_lv_load_area(self, lv_load_area):
""" Adds a Load Area `lv_load_area` to _lv_load_areas if not already existing
Additionally, adds the associated centre object to MV grid's _graph as node.
Args
----
lv_load_area: LVLoadAreaDing0
instance of class LVLoadAreaDing0
"""
if lv_load_area not in self.lv_load_areas() and isinstance(lv_load_area, LVLoadAreaDing0):
self._lv_load_areas.append(lv_load_area)
self.mv_grid.graph_add_node(lv_load_area.lv_load_area_centre)
|
python
|
{
"resource": ""
}
|
q14683
|
MVGridDistrictDing0.add_lv_load_area_group
|
train
|
def add_lv_load_area_group(self, lv_load_area_group):
"""Adds a LV load_area to _lv_load_areas if not already existing.
"""
if lv_load_area_group not in self.lv_load_area_groups():
self._lv_load_area_groups.append(lv_load_area_group)
|
python
|
{
"resource": ""
}
|
q14684
|
MVGridDistrictDing0.add_aggregated_peak_demand
|
train
|
def add_aggregated_peak_demand(self):
"""Summarizes peak loads of underlying aggregated load_areas"""
peak_load_aggregated = 0
for lv_load_area in self.lv_load_areas():
if lv_load_area.is_aggregated:
peak_load_aggregated += lv_load_area.peak_load
self.peak_load_aggregated = peak_load_aggregated
|
python
|
{
"resource": ""
}
|
q14685
|
LVLoadAreaDing0.peak_generation
|
train
|
def peak_generation(self):
"""Cumulative peak generation of generators connected to LV grids of
underlying LVGDs
"""
cum_peak_generation = 0
for lv_grid_district in self._lv_grid_districts:
cum_peak_generation += lv_grid_district.lv_grid.station().peak_generation
return cum_peak_generation
|
python
|
{
"resource": ""
}
|
q14686
|
calc_geo_branches_in_polygon
|
train
|
def calc_geo_branches_in_polygon(mv_grid, polygon, mode, proj):
""" Calculate geographical branches in polygon.
For a given `mv_grid` all branches (edges in the graph of the grid) are
tested if they are in the given `polygon`. You can choose different modes
and projections for this operation.
Parameters
----------
mv_grid : MVGridDing0
MV Grid object. Edges contained in `mv_grid.graph_edges()` are taken
for the test.
polygon : :shapely:`Shapely Point object<points>`
Polygon that contains edges.
mode : str
Choose between 'intersects' or 'contains'.
proj : int
EPSG code to specify projection
Returns
-------
:any:`list` of :any:`BranchDing0` objects
List of branches
"""
branches = []
polygon_shp = transform(proj, polygon)
for branch in mv_grid.graph_edges():
nodes = branch['adj_nodes']
branch_shp = transform(proj, LineString([nodes[0].geo_data, nodes[1].geo_data]))
# check if branches intersect with polygon if mode = 'intersects'
if mode == 'intersects':
if polygon_shp.intersects(branch_shp):
branches.append(branch)
# check if polygon contains branches if mode = 'contains'
elif mode == 'contains':
if polygon_shp.contains(branch_shp):
branches.append(branch)
# error
else:
raise ValueError('Mode is invalid!')
return branches
|
python
|
{
"resource": ""
}
|
q14687
|
calc_geo_branches_in_buffer
|
train
|
def calc_geo_branches_in_buffer(node, mv_grid, radius, radius_inc, proj):
""" Determines branches in nodes' associated graph that are at least partly
within buffer of `radius` from `node`.
If there are no nodes, the buffer is successively extended by `radius_inc`
until nodes are found.
Parameters
----------
node : LVStationDing0, GeneratorDing0, or CableDistributorDing0
origin node (e.g. LVStationDing0 object) with associated shapely object
(attribute `geo_data`) in any CRS (e.g. WGS84)
radius : float
buffer radius in m
radius_inc : float
radius increment in m
proj : int
pyproj projection object: nodes' CRS to equidistant CRS
(e.g. WGS84 -> ETRS)
Returns
-------
:any:`list` of :networkx:`NetworkX Graph Obj< >`
List of branches (NetworkX branch objects)
"""
branches = []
while not branches:
node_shp = transform(proj, node.geo_data)
buffer_zone_shp = node_shp.buffer(radius)
for branch in mv_grid.graph_edges():
nodes = branch['adj_nodes']
branch_shp = transform(proj, LineString([nodes[0].geo_data, nodes[1].geo_data]))
if buffer_zone_shp.intersects(branch_shp):
branches.append(branch)
radius += radius_inc
return branches
|
python
|
{
"resource": ""
}
|
q14688
|
disconnect_node
|
train
|
def disconnect_node(node, target_obj_result, graph, debug):
""" Disconnects `node` from `target_obj`
Args
----
node: LVLoadAreaCentreDing0, i.e.
Origin node - Ding0 graph object (e.g. LVLoadAreaCentreDing0)
target_obj_result: LVLoadAreaCentreDing0, i.e.
Origin node - Ding0 graph object (e.g. LVLoadAreaCentreDing0)
graph: :networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes and newly created branches
debug: bool
If True, information is printed during process
"""
# backup kind and type of branch
branch_kind = graph.adj[node][target_obj_result]['branch'].kind
branch_type = graph.adj[node][target_obj_result]['branch'].type
branch_ring = graph.adj[node][target_obj_result]['branch'].ring
graph.remove_edge(node, target_obj_result)
if isinstance(target_obj_result, MVCableDistributorDing0):
neighbor_nodes = list(graph.neighbors(target_obj_result))
if len(neighbor_nodes) == 2:
node.grid.remove_cable_distributor(target_obj_result)
branch_length = calc_geo_dist_vincenty(neighbor_nodes[0], neighbor_nodes[1])
graph.add_edge(neighbor_nodes[0], neighbor_nodes[1], branch=BranchDing0(length=branch_length,
kind=branch_kind,
type=branch_type,
ring=branch_ring))
if debug:
logger.debug('disconnect edge {0}-{1}'.format(node, target_obj_result))
|
python
|
{
"resource": ""
}
|
q14689
|
mv_connect_generators
|
train
|
def mv_connect_generators(mv_grid_district, graph, debug=False):
"""Connect MV generators to MV grid
Args
----
mv_grid_district: MVGridDistrictDing0
MVGridDistrictDing0 object for which the connection process has to be
done
graph: :networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes
debug: bool, defaults to False
If True, information is printed during process.
Returns
-------
:networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes and newly created branches
"""
generator_buffer_radius = cfg_ding0.get('mv_connect', 'generator_buffer_radius')
generator_buffer_radius_inc = cfg_ding0.get('mv_connect', 'generator_buffer_radius_inc')
# WGS84 (conformal) to ETRS (equidistant) projection
proj1 = partial(
pyproj.transform,
pyproj.Proj(init='epsg:4326'), # source coordinate system
pyproj.Proj(init='epsg:3035')) # destination coordinate system
# ETRS (equidistant) to WGS84 (conformal) projection
proj2 = partial(
pyproj.transform,
pyproj.Proj(init='epsg:3035'), # source coordinate system
pyproj.Proj(init='epsg:4326')) # destination coordinate system
for generator in sorted(mv_grid_district.mv_grid.generators(), key=lambda x: repr(x)):
# ===== voltage level 4: generator has to be connected to MV station =====
if generator.v_level == 4:
mv_station = mv_grid_district.mv_grid.station()
branch_length = calc_geo_dist_vincenty(generator, mv_station)
# TODO: set branch type to something reasonable (to be calculated)
branch_kind = mv_grid_district.mv_grid.default_branch_kind
branch_type = mv_grid_district.mv_grid.default_branch_type
branch = BranchDing0(length=branch_length,
kind=branch_kind,
type=branch_type,
ring=None)
graph.add_edge(generator, mv_station, branch=branch)
if debug:
logger.debug('Generator {0} was connected to {1}'.format(
generator, mv_station))
# ===== voltage level 5: generator has to be connected to MV grid (next-neighbor) =====
elif generator.v_level == 5:
generator_shp = transform(proj1, generator.geo_data)
# get branches within a the predefined radius `generator_buffer_radius`
branches = calc_geo_branches_in_buffer(generator,
mv_grid_district.mv_grid,
generator_buffer_radius,
generator_buffer_radius_inc, proj1)
# calc distance between generator and grid's lines -> find nearest line
conn_objects_min_stack = find_nearest_conn_objects(generator_shp,
branches,
proj1,
conn_dist_weight=1,
debug=debug,
branches_only=False)
# connect!
# go through the stack (from nearest to most far connection target object)
generator_connected = False
for dist_min_obj in conn_objects_min_stack:
# Note 1: conn_dist_ring_mod=0 to avoid re-routing of existent lines
# Note 2: In connect_node(), the default cable/line type of grid is used. This is reasonable since
# the max. allowed power of the smallest possible cable/line type (3.64 MVA for overhead
# line of type 48-AL1/8-ST1A) exceeds the max. allowed power of a generator (4.5 MVA (dena))
# (if connected separately!)
target_obj_result = connect_node(generator,
generator_shp,
mv_grid_district.mv_grid,
dist_min_obj,
proj2,
graph,
conn_dist_ring_mod=0,
debug=debug)
if target_obj_result is not None:
if debug:
logger.debug(
'Generator {0} was connected to {1}'.format(
generator, target_obj_result))
generator_connected = True
break
if not generator_connected and debug:
logger.debug(
'Generator {0} could not be connected, try to '
'increase the parameter `generator_buffer_radius` in '
'config file `config_calc.cfg` to gain more possible '
'connection points.'.format(generator))
return graph
|
python
|
{
"resource": ""
}
|
q14690
|
solve
|
train
|
def solve(graph, debug=False, anim=None):
# TODO: check docstring
""" Do MV routing for given nodes in `graph`.
Translate data from node objects to appropriate format before.
Args
----
graph: :networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes
debug: bool, defaults to False
If True, information is printed while routing
anim: AnimationDing0
AnimationDing0 object
Returns
-------
:networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes and edges
See Also
--------
ding0.tools.animation.AnimationDing0 : for a more detailed description on anim parameter.
"""
# TODO: Implement debug mode (pass to solver) to get more information while routing (print routes, draw network, ..)
# translate DING0 graph to routing specs
specs = ding0_graph_to_routing_specs(graph)
# create routing graph using specs
RoutingGraph = Graph(specs)
timeout = 30000
# create solver objects
savings_solver = savings.ClarkeWrightSolver()
local_search_solver = local_search.LocalSearchSolver()
start = time.time()
# create initial solution using Clarke and Wright Savings methods
savings_solution = savings_solver.solve(RoutingGraph, timeout, debug, anim)
# OLD, MAY BE USED LATER - Guido, please don't declare a variable later=now() :) :
#if not savings_solution.is_complete():
# print('=== Solution is not a complete solution! ===')
if debug:
logger.debug('ClarkeWrightSolver solution:')
util.print_solution(savings_solution)
logger.debug('Elapsed time (seconds): {}'.format(time.time() - start))
#savings_solution.draw_network()
# improve initial solution using local search
local_search_solution = local_search_solver.solve(RoutingGraph, savings_solution, timeout, debug, anim)
# this line is for debug plotting purposes:
#local_search_solution = savings_solution
if debug:
logger.debug('Local Search solution:')
util.print_solution(local_search_solution)
logger.debug('Elapsed time (seconds): {}'.format(time.time() - start))
#local_search_solution.draw_network()
return routing_solution_to_ding0_graph(graph, local_search_solution)
|
python
|
{
"resource": ""
}
|
q14691
|
cable_type
|
train
|
def cable_type(nom_power, nom_voltage, avail_cables):
"""Determine suitable type of cable for given nominal power
Based on maximum occurring current which is derived from nominal power
(either peak load or max. generation capacity) a suitable cable type is
chosen. Thus, no line overloading issues should occur.
Parameters
----------
nom_power : float
Nominal power of generators or loads connected via a cable
nom_voltage : float
Nominal voltage in kV
avail_cables : :pandas:`pandas.DataFrame<dataframe>`
Available cable types including it's electrical parameters
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Parameters of cable type
"""
I_max_load = nom_power / (3 ** 0.5 * nom_voltage)
# determine suitable cable for this current
suitable_cables = avail_cables[avail_cables['I_max_th'] > I_max_load]
if not suitable_cables.empty:
cable_type = suitable_cables.loc[suitable_cables['I_max_th'].idxmin(), :]
else:
cable_type = avail_cables.loc[avail_cables['I_max_th'].idxmax(), :]
return cable_type
|
python
|
{
"resource": ""
}
|
q14692
|
handle_duplications
|
train
|
def handle_duplications(file_path):
""" Omits the duplications in the strings files.
Keys that appear more than once, will be joined to one appearance and the omit will be documented.
Args:
file_path (str): The path to the strings file.
"""
logging.info('Handling duplications for "%s"', file_path)
f = open_strings_file(file_path, "r+")
header_comment_key_value_tuples = extract_header_comment_key_value_tuples_from_file(f)
file_elements = []
section_file_elements = []
keys_to_objects = {}
duplicates_found = []
for header_comment, comments, key, value in header_comment_key_value_tuples:
if len(header_comment) > 0:
# New section - Appending the last section entries, sorted by comment
for elem in sorted(section_file_elements, key=lambda x: x.comments[0]):
file_elements.append(elem)
section_file_elements = []
file_elements.append(Comment(header_comment))
if key in keys_to_objects:
keys_to_objects[key].add_comments(comments)
duplicates_found.append(key)
else:
loc_obj = LocalizationEntry(comments, key, value)
keys_to_objects[key] = loc_obj
section_file_elements.append(loc_obj)
# Adding last section
for elem in sorted(section_file_elements, key=lambda x: x.comments[0]):
file_elements.append(elem)
f.seek(0)
for element in file_elements:
f.write(unicode(element))
f.write(u"\n")
f.truncate()
f.close()
logging.info("Omitted %d duplicates (%s)" % (len(duplicates_found), ",".join(duplicates_found)))
logging.info('Finished handling duplications for "%s"', file_path)
|
python
|
{
"resource": ""
}
|
q14693
|
Api._copy_resources
|
train
|
def _copy_resources(self):
"""
Copy all of the resources over to the toplevel client
-return: populates self with a pointer to each ._client.Resource
"""
for resource in self._client._resources:
# set the name param, the keys now have / in them
potion_resource = self._client._resources[resource]
try:
oc_cls = _model_lookup[resource]
oc_cls._api = self
oc_cls._resource = potion_resource
setattr(self, oc_cls.__name__, oc_cls)
except KeyError: # Ignore resources we don't explicitly model
pass
|
python
|
{
"resource": ""
}
|
q14694
|
login_uname_pwd
|
train
|
def login_uname_pwd(server, api_key=None):
"""
Prompts user for username and password, gets API key from server
if not provided.
"""
username = click.prompt("Please enter your One Codex (email)")
if api_key is not None:
return username, api_key
password = click.prompt("Please enter your password (typing will be hidden)", hide_input=True)
# now get the API key
api_key = fetch_api_key_from_uname(username, password, server)
return username, api_key
|
python
|
{
"resource": ""
}
|
q14695
|
login_required
|
train
|
def login_required(fn):
"""Requires login before proceeding, but does not prompt the user to login. Decorator should
be used only on Click CLI commands.
Notes
-----
Different means of authentication will be attempted in this order:
1. An API key present in the Click context object from a previous successful authentication.
2. A bearer token (ONE_CODEX_BEARER_TOKEN) in the environment.
3. An API key (ONE_CODEX_API_KEY) in the environment.
4. An API key in the credentials file (~/.onecodex).
"""
@wraps(fn)
def login_wrapper(ctx, *args, **kwargs):
base_url = os.environ.get("ONE_CODEX_API_BASE", "https://app.onecodex.com")
api_kwargs = {"telemetry": ctx.obj["TELEMETRY"]}
api_key_prior_login = ctx.obj.get("API_KEY")
bearer_token_env = os.environ.get("ONE_CODEX_BEARER_TOKEN")
api_key_env = os.environ.get("ONE_CODEX_API_KEY")
api_key_creds_file = _login(base_url, silent=True)
if api_key_prior_login is not None:
api_kwargs["api_key"] = api_key_prior_login
elif bearer_token_env is not None:
api_kwargs["bearer_token"] = bearer_token_env
elif api_key_env is not None:
api_kwargs["api_key"] = api_key_env
elif api_key_creds_file is not None:
api_kwargs["api_key"] = api_key_creds_file
else:
click.echo(
"The command you specified requires authentication. Please login first.\n", err=True
)
ctx.exit()
ctx.obj["API"] = Api(**api_kwargs)
return fn(ctx, *args, **kwargs)
return login_wrapper
|
python
|
{
"resource": ""
}
|
q14696
|
Classifications.results
|
train
|
def results(self, json=True):
"""
Returns the complete results table for the classification.
Parameters
----------
json : bool, optional
Return result as JSON? Default True.
Returns
-------
table : dict | DataFrame
Return a JSON object with the classification results or a Pandas DataFrame
if json=False.
"""
if json is True:
return self._results()
else:
return self._table()
|
python
|
{
"resource": ""
}
|
q14697
|
Classifications.abundances
|
train
|
def abundances(self, ids=None):
"""
Query the results table to get abundance data for all or some tax ids
"""
# TODO: Consider removing this method... since it's kind of trivial
# May want to replace with something that actually gets genome-size adjusted
# abundances from the results table
if ids is None:
# get the data frame
return self.table()
else:
res = self.table()
return res[res["tax_id"].isin(ids)]
|
python
|
{
"resource": ""
}
|
q14698
|
LocalizationEntry.add_comments
|
train
|
def add_comments(self, comments):
""" Add comments to the localization entry
Args:
comments (list of str): The comments to be added to the localization entry.
"""
for comment in comments:
if comment not in self.comments and len(comment) > 0:
self.comments.append(comment)
if len(self.comments[0]) == 0:
self.comments.pop(0)
|
python
|
{
"resource": ""
}
|
q14699
|
merge_translations
|
train
|
def merge_translations(localization_bundle_path):
""" Merges the new translation with the old one.
The translated files are saved as '.translated' file, and are merged with old translated file.
Args:
localization_bundle_path (str): The path to the localization bundle.
"""
logging.info("Merging translations")
for lang_dir in os.listdir(localization_bundle_path):
if lang_dir == DEFAULT_LANGUAGE_DIRECTORY_NAME:
continue
for translated_path in glob.glob(os.path.join(localization_bundle_path, lang_dir, "*" + TRANSLATED_SUFFIX)):
strings_path = translated_path[:-1 * len(TRANSLATED_SUFFIX)]
localizable_path = os.path.join(localization_bundle_path,
DEFAULT_LANGUAGE_DIRECTORY_NAME,
os.path.basename(strings_path))
localization_merge_back(localizable_path, strings_path, translated_path, strings_path)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.