code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
mv_station = MVStationDing0(id_db=subst_id, geo_data=station_geo_data) mv_grid = MVGridDing0(network=self, id_db=poly_id, station=mv_station) mv_grid_district = MVGridDistrictDing0(id_db=poly_id, mv_grid=mv_grid, geo_data=grid_district_geo_data) mv_grid.grid_district = mv_grid_district mv_station.grid = mv_grid self.add_mv_grid_district(mv_grid_district) return mv_grid_district
def build_mv_grid_district(self, poly_id, subst_id, grid_district_geo_data, station_geo_data)
Initiates single MV grid_district including station and grid Parameters ---------- poly_id: int ID of grid_district according to database table. Also used as ID for created grid #TODO: check type subst_id: int ID of station according to database table #TODO: check type grid_district_geo_data: :shapely:`Shapely Polygon object<polygons>` Polygon of grid district station_geo_data: :shapely:`Shapely Point object<points>` Point of station Returns ------- :shapely:`Shapely Polygon object<polygons>` Description of return #TODO: check
2.760585
3.077307
0.897078
# check arguments if not all(isinstance(_, int) for _ in mv_grid_districts_no): raise TypeError('`mv_grid_districts` has to be a list of integers.') # get srid settings from config try: srid = str(int(cfg_ding0.get('geo', 'srid'))) except OSError: logger.exception('cannot open config file.') # build SQL query grid_districts = session.query(self.orm['orm_mv_grid_districts'].subst_id, func.ST_AsText(func.ST_Transform( self.orm['orm_mv_grid_districts'].geom, srid)). \ label('poly_geom'), func.ST_AsText(func.ST_Transform( self.orm['orm_mv_stations'].point, srid)). \ label('subs_geom')).\ join(self.orm['orm_mv_stations'], self.orm['orm_mv_grid_districts'].subst_id == self.orm['orm_mv_stations'].subst_id).\ filter(self.orm['orm_mv_grid_districts'].subst_id.in_(mv_grid_districts_no)). \ filter(self.orm['version_condition_mvgd']). \ filter(self.orm['version_condition_mv_stations']). \ distinct() # read MV data from db mv_data = pd.read_sql_query(grid_districts.statement, session.bind, index_col='subst_id') # iterate over grid_district/station datasets and initiate objects for poly_id, row in mv_data.iterrows(): subst_id = poly_id region_geo_data = wkt_loads(row['poly_geom']) # transform `region_geo_data` to epsg 3035 # to achieve correct area calculation of mv_grid_district station_geo_data = wkt_loads(row['subs_geom']) # projection = partial( # pyproj.transform, # pyproj.Proj(init='epsg:4326'), # source coordinate system # pyproj.Proj(init='epsg:3035')) # destination coordinate system # # region_geo_data = transform(projection, region_geo_data) mv_grid_district = self.build_mv_grid_district(poly_id, subst_id, region_geo_data, station_geo_data) # import all lv_stations within mv_grid_district lv_stations = self.import_lv_stations(session) # import all lv_grid_districts within mv_grid_district lv_grid_districts = self.import_lv_grid_districts(session, lv_stations) # import load areas self.import_lv_load_areas(session, mv_grid_district, lv_grid_districts, lv_stations) # add sum of peak loads of underlying lv grid_districts to mv_grid_district mv_grid_district.add_peak_demand() logger.info('=====> MV Grid Districts imported')
def import_mv_grid_districts(self, session, mv_grid_districts_no=None)
Imports MV Grid Districts, HV-MV stations, Load Areas, LV Grid Districts and MV-LV stations, instantiates and initiates objects. Parameters ---------- session : sqlalchemy.orm.session.Session Database session mv_grid_districts : List of MV grid_districts/stations (int) to be imported (if empty, all grid_districts & stations are imported) See Also -------- build_mv_grid_district : used to instantiate MV grid_district objects import_lv_load_areas : used to import load_areas for every single MV grid_district ding0.core.structure.regions.MVGridDistrictDing0.add_peak_demand : used to summarize peak loads of underlying load_areas
3.317176
2.959706
1.120779
# get ding0s' standard CRS (SRID) srid = str(int(cfg_ding0.get('geo', 'srid'))) # SET SRID 3035 to achieve correct area calculation of lv_grid_district # srid = '3035' gw2kw = 10 ** 6 # load in database is in GW -> scale to kW # 1. filter grid districts of relevant load area lv_grid_districs_sqla = session.query( self.orm['orm_lv_grid_district'].mvlv_subst_id, self.orm['orm_lv_grid_district'].la_id, self.orm['orm_lv_grid_district'].zensus_sum.label('population'), (self.orm[ 'orm_lv_grid_district'].sector_peakload_residential * gw2kw). label('peak_load_residential'), (self.orm['orm_lv_grid_district'].sector_peakload_retail * gw2kw). label('peak_load_retail'), (self.orm[ 'orm_lv_grid_district'].sector_peakload_industrial * gw2kw). label('peak_load_industrial'), (self.orm[ 'orm_lv_grid_district'].sector_peakload_agricultural * gw2kw). label('peak_load_agricultural'), ((self.orm['orm_lv_grid_district'].sector_peakload_residential + self.orm['orm_lv_grid_district'].sector_peakload_retail + self.orm['orm_lv_grid_district'].sector_peakload_industrial + self.orm['orm_lv_grid_district'].sector_peakload_agricultural) * gw2kw).label('peak_load'), func.ST_AsText(func.ST_Transform( self.orm['orm_lv_grid_district'].geom, srid)).label('geom'), self.orm['orm_lv_grid_district'].sector_count_residential, self.orm['orm_lv_grid_district'].sector_count_retail, self.orm['orm_lv_grid_district'].sector_count_industrial, self.orm['orm_lv_grid_district'].sector_count_agricultural, (self.orm[ 'orm_lv_grid_district'].sector_consumption_residential * gw2kw). \ label('sector_consumption_residential'), (self.orm['orm_lv_grid_district'].sector_consumption_retail * gw2kw). \ label('sector_consumption_retail'), (self.orm[ 'orm_lv_grid_district'].sector_consumption_industrial * gw2kw). \ label('sector_consumption_industrial'), (self.orm[ 'orm_lv_grid_district'].sector_consumption_agricultural * gw2kw). \ label('sector_consumption_agricultural'), self.orm['orm_lv_grid_district'].mvlv_subst_id). \ filter(self.orm['orm_lv_grid_district'].mvlv_subst_id.in_( lv_stations.index.tolist())). \ filter(self.orm['version_condition_lvgd']) # read data from db lv_grid_districts = pd.read_sql_query(lv_grid_districs_sqla.statement, session.bind, index_col='mvlv_subst_id') lv_grid_districts[ ['sector_count_residential', 'sector_count_retail', 'sector_count_industrial', 'sector_count_agricultural']] = lv_grid_districts[ ['sector_count_residential', 'sector_count_retail', 'sector_count_industrial', 'sector_count_agricultural']].fillna(0) return lv_grid_districts
def import_lv_grid_districts(self, session, lv_stations)
Imports all lv grid districts within given load area Parameters ---------- session : sqlalchemy.orm.session.Session Database session Returns ------- lv_grid_districts: :pandas:`pandas.DataFrame<dataframe>` Table of lv_grid_districts
2.37625
2.348166
1.01196
# get ding0s' standard CRS (SRID) srid = str(int(cfg_ding0.get('geo', 'srid'))) # get list of mv grid districts mv_grid_districts = list(self.get_mvgd_lvla_lvgd_obj_from_id()[0]) lv_stations_sqla = session.query(self.orm['orm_lv_stations'].mvlv_subst_id, self.orm['orm_lv_stations'].la_id, func.ST_AsText(func.ST_Transform( self.orm['orm_lv_stations'].geom, srid)). \ label('geom')).\ filter(self.orm['orm_lv_stations'].subst_id.in_(mv_grid_districts)). \ filter(self.orm['version_condition_mvlvst']) # read data from db lv_grid_stations = pd.read_sql_query(lv_stations_sqla.statement, session.bind, index_col='mvlv_subst_id') return lv_grid_stations
def import_lv_stations(self, session)
Import lv_stations within the given load_area Parameters ---------- session : sqlalchemy.orm.session.Session Database session Returns ------- lv_stations: :pandas:`pandas.DataFrame<dataframe>` Table of lv_stations
6.561568
6.743894
0.972964
# load parameters from configs cfg_ding0.load_config('config_db_tables.cfg') cfg_ding0.load_config('config_calc.cfg') cfg_ding0.load_config('config_files.cfg') cfg_ding0.load_config('config_misc.cfg') cfg_dict = cfg_ding0.cfg._sections return cfg_dict
def import_config(self)
Loads parameters from config files Returns ------- int config object #TODO check type
5.388489
5.226763
1.030942
scenario = cfg_ding0.get("powerflow", "test_grid_stability_scenario") start_hour = int(cfg_ding0.get("powerflow", "start_hour")) end_hour = int(cfg_ding0.get("powerflow", "end_hour")) start_time = datetime(1970, 1, 1, 00, 00, 0) resolution = cfg_ding0.get("powerflow", "resolution") srid = str(int(cfg_ding0.get('geo', 'srid'))) return PFConfigDing0(scenarios=[scenario], timestep_start=start_time, timesteps_count=end_hour-start_hour, srid=srid, resolution=resolution)
def import_pf_config(self)
Creates power flow config class and imports config from file Returns ------- PFConfigDing0 PFConfigDing0 object
4.304978
3.779497
1.139035
#TODO: check docstring msg_invalidity = [] invalid_mv_grid_districts = [] for grid_district in self.mv_grid_districts(): # there's only one node (MV station) => grid is empty if len(grid_district.mv_grid._graph.nodes()) == 1: invalid_mv_grid_districts.append(grid_district) msg_invalidity.append('MV Grid District {} seems to be empty ' \ 'and ' \ 'was removed'.format(grid_district)) # there're only aggregated load areas elif all([lvla.is_aggregated for lvla in grid_district.lv_load_areas()]): invalid_mv_grid_districts.append(grid_district) msg_invalidity.append("MV Grid District {} contains only " \ "aggregated Load Areas and was removed" \ "".format(grid_district)) for grid_district in invalid_mv_grid_districts: self._mv_grid_districts.remove(grid_district) logger.warning("\n".join(msg_invalidity)) logger.info('=====> MV Grids validated') return msg_invalidity
def validate_grid_districts(self)
Tests MV grid districts for validity concerning imported data such as: i) Uno ii) Dos Invalid MV grid districts are subsequently deleted from Network.
4.482713
4.312335
1.03951
if animation: anim = AnimationDing0() else: anim = None for grid_district in self.mv_grid_districts(): grid_district.mv_grid.routing(debug=debug, anim=anim) logger.info('=====> MV Routing (Routing, Connection of Satellites & ' 'Stations) performed')
def mv_routing(self, debug=False, animation=False)
Performs routing on all MV grids. Parameters ---------- debug: bool, default to False If True, information is printed while routing animation: bool, default to False If True, images of route modification steps are exported during routing process. A new animation object is created. See Also -------- ding0.core.network.grids.MVGridDing0.routing : for details on MVGridDing0 objects routing ding0.tools.animation.AnimationDing0 : for details on animation function.
14.407263
9.021312
1.597025
for mv_grid_district in self.mv_grid_districts(): for load_area in mv_grid_district.lv_load_areas(): if not load_area.is_aggregated: for lv_grid_district in load_area.lv_grid_districts(): lv_grid_district.lv_grid.build_grid() else: logger.info( '{} is of type aggregated. No grid is created.'.format(repr(load_area))) logger.info('=====> LV model grids created')
def build_lv_grids(self)
Builds LV grids for every non-aggregated LA in every MV grid district using model grids.
5.451837
4.183856
1.303065
for mv_grid_district in self.mv_grid_districts(): mv_grid_district.mv_grid.connect_generators(debug=debug) # get predefined random seed and initialize random generator seed = int(cfg_ding0.get('random', 'seed')) random.seed(a=seed) for load_area in mv_grid_district.lv_load_areas(): if not load_area.is_aggregated: for lv_grid_district in load_area.lv_grid_districts(): lv_grid_district.lv_grid.connect_generators(debug=debug) if debug: lv_grid_district.lv_grid.graph_draw(mode='LV') else: logger.info( '{} is of type aggregated. LV generators are not connected to LV grids.'.format(repr(load_area))) logger.info('=====> Generators connected')
def connect_generators(self, debug=False)
Connects generators (graph nodes) to grid (graph) for every MV and LV Grid District Args ---- debug: bool, defaults to False If True, information is printed during process.
6.242691
5.24765
1.189616
for grid_district in self.mv_grid_districts(): grid_district.mv_grid.parametrize_grid(debug=debug) logger.info('=====> MV Grids parametrized')
def mv_parametrize_grid(self, debug=False)
Performs Parametrization of grid equipment of all MV grids. Parameters ---------- debug: bool, defaults to False If True, information is printed during process. See Also -------- ding0.core.network.grids.MVGridDing0.parametrize_grid
8.132427
8.602471
0.945359
for grid_district in self.mv_grid_districts(): grid_district.mv_grid.set_branch_ids() logger.info('=====> Branch IDs set')
def set_branch_ids(self)
Performs generation and setting of ids of branches for all MV and underlying LV grids. See Also -------- ding0.core.network.grids.MVGridDing0.set_branch_ids
10.923929
9.974041
1.095236
for grid_district in self.mv_grid_districts(): grid_district.mv_grid.set_circuit_breakers(debug=debug) logger.info('=====> MV Circuit Breakers relocated')
def set_circuit_breakers(self, debug=False)
Calculates the optimal position of the existing circuit breakers and relocates them within the graph for all MV grids. Args ---- debug: bool, defaults to False If True, information is printed during process See Also -------- ding0.grid.mv_grid.tools.set_circuit_breakers
11.768369
9.405073
1.251279
for grid_district in self.mv_grid_districts(): if mode == 'open': grid_district.mv_grid.open_circuit_breakers() elif mode == 'close': grid_district.mv_grid.close_circuit_breakers() else: raise ValueError('\'mode\' is invalid.') if mode == 'open': logger.info('=====> MV Circuit Breakers opened') elif mode == 'close': logger.info('=====> MV Circuit Breakers closed')
def control_circuit_breakers(self, mode=None)
Opens or closes all circuit breakers of all MV grids. Args ---- mode: str Set mode='open' to open, mode='close' to close
3.648632
3.033998
1.202582
if method == 'db': # Empty tables pypsa_io.delete_powerflow_tables(session) for grid_district in self.mv_grid_districts(): if export_pypsa: export_pypsa_dir = repr(grid_district.mv_grid) else: export_pypsa_dir = None grid_district.mv_grid.run_powerflow(session, method='db', export_pypsa_dir=export_pypsa_dir, debug=debug) elif method == 'onthefly': for grid_district in self.mv_grid_districts(): if export_pypsa: export_pypsa_dir = repr(grid_district.mv_grid) else: export_pypsa_dir = None grid_district.mv_grid.run_powerflow(session, method='onthefly', export_pypsa_dir=export_pypsa_dir, debug=debug)
def run_powerflow(self, session, method='onthefly', export_pypsa=False, debug=False)
Performs power flow calculation for all MV grids Args: session : sqlalchemy.orm.session.Session Database session method: str Specify export method If method='db' grid data will be exported to database If method='onthefly' grid data will be passed to PyPSA directly (default) export_pypsa: bool If True PyPSA networks will be exported as csv to output/debug/grid/<MV-GRID_NAME>/ debug: bool, defaults to False If True, information is printed during process
2.267253
2.143264
1.05785
# TODO: Finish method and enable LV case for grid_district in self.mv_grid_districts(): # reinforce MV grid grid_district.mv_grid.reinforce_grid() # reinforce LV grids for lv_load_area in grid_district.lv_load_areas(): if not lv_load_area.is_aggregated: for lv_grid_district in lv_load_area.lv_grid_districts(): lv_grid_district.lv_grid.reinforce_grid()
def reinforce_grid(self)
Performs grid reinforcement measures for all MV and LV grids Args: Returns:
5.012911
4.414918
1.135448
# Get latest version and/or git commit hash try: version = subprocess.check_output( ["git", "describe", "--tags", "--always"]).decode('utf8') except: version = None # Collect names of database table used to run Ding0 and data version if self.config['input_data_source']['input_data'] == 'versioned': data_version = self.config['versioned']['version'] database_tables = self.config['versioned'] elif self.config['input_data_source']['input_data'] == 'model_draft': data_version = 'model_draft' database_tables = self.config['model_draft'] else: data_version = 'unknown' database_tables = 'unknown' # Collect assumptions assumptions = {} assumptions.update(self.config['assumptions']) assumptions.update(self.config['mv_connect']) assumptions.update(self.config['mv_routing']) assumptions.update(self.config['mv_routing_tech_constraints']) # Determine run_id if not set if not run_id: run_id = datetime.now().strftime("%Y%m%d%H%M%S") # Set instance attribute run_id if not self._run_id: self._run_id = run_id # Assing data to dict metadata = dict( version=version, mv_grid_districts=[int(_.id_db) for _ in self._mv_grid_districts], database_tables=database_tables, data_version=data_version, assumptions=assumptions, run_id=self._run_id ) return metadata
def metadata(self, run_id=None)
Provide metadata on a Ding0 run Parameters ---------- run_id: str, (defaults to current date) Distinguish multiple versions of Ding0 data by a `run_id`. If not set it defaults to current date in the format YYYYMMDDhhmmss Returns ------- dict Metadata
4.070003
3.740091
1.08821
srid = str(int(cfg_ding0.get('geo', 'srid'))) # build dicts to map MV grid district and Load Area ids to related objects mv_grid_districts_dict,\ lv_load_areas_dict,\ lv_grid_districts_dict,\ lv_stations_dict = self.get_mvgd_lvla_lvgd_obj_from_id() # import renewable generators # build query generators_sqla = session.query( self.orm['orm_re_generators'].columns.id, self.orm['orm_re_generators'].columns.subst_id, self.orm['orm_re_generators'].columns.la_id, self.orm['orm_re_generators'].columns.mvlv_subst_id, self.orm['orm_re_generators'].columns.electrical_capacity, self.orm['orm_re_generators'].columns.generation_type, self.orm['orm_re_generators'].columns.generation_subtype, self.orm['orm_re_generators'].columns.voltage_level, func.ST_AsText(func.ST_Transform( self.orm['orm_re_generators'].columns.rea_geom_new, srid)).label('geom_new'), func.ST_AsText(func.ST_Transform( self.orm['orm_re_generators'].columns.geom, srid)).label('geom') ).filter( self.orm['orm_re_generators'].columns.subst_id.in_(list(mv_grid_districts_dict))). \ filter(self.orm['orm_re_generators'].columns.voltage_level.in_([4, 5, 6, 7])). \ filter(self.orm['version_condition_re']) # read data from db generators_res = pd.read_sql_query(generators_sqla.statement, session.bind, index_col='id') generators_res.columns = ['GenCap' if c=='electrical_capacity' else 'type' if c=='generation_type' else 'subtype' if c=='generation_subtype' else 'v_level' if c=='voltage_level' else c for c in generators_res.columns] ########################### # Imports conventional (conv) generators # build query generators_sqla = session.query( self.orm['orm_conv_generators'].columns.id, self.orm['orm_conv_generators'].columns.subst_id, self.orm['orm_conv_generators'].columns.name, self.orm['orm_conv_generators'].columns.capacity, self.orm['orm_conv_generators'].columns.fuel, self.orm['orm_conv_generators'].columns.voltage_level, func.ST_AsText(func.ST_Transform( self.orm['orm_conv_generators'].columns.geom, srid)).label('geom') ).filter( self.orm['orm_conv_generators'].columns.subst_id.in_(list(mv_grid_districts_dict))). \ filter(self.orm['orm_conv_generators'].columns.voltage_level.in_([4, 5, 6])). \ filter(self.orm['version_condition_conv']) # read data from db generators_conv = pd.read_sql_query(generators_sqla.statement, session.bind, index_col='id') generators_conv.columns = ['GenCap' if c=='capacity' else 'type' if c=='fuel' else 'v_level' if c=='voltage_level' else c for c in generators_conv.columns] ########################### generators = pd.concat([generators_conv, generators_res], axis=0) generators = generators.fillna('other') return generators
def list_generators(self, session)
List renewable (res) and conventional (conv) generators Args ---- session : sqlalchemy.orm.session.Session Database session Returns ------- DataFrame
2.598487
2.537545
1.024016
# threshold: load area peak load, if peak load < threshold => disregard # load area lv_loads_threshold = cfg_ding0.get('mv_routing', 'load_area_threshold') #lv_loads_threshold = 0 gw2kw = 10 ** 6 # load in database is in GW -> scale to kW #filter list for only desired MV districts stations_list = [d.mv_grid._station.id_db for d in mv_districts] # build SQL query lv_load_areas_sqla = session.query( self.orm['orm_lv_load_areas'].id.label('id_db'), (self.orm['orm_lv_load_areas'].sector_peakload_residential * gw2kw).\ label('peak_load_residential'), (self.orm['orm_lv_load_areas'].sector_peakload_retail * gw2kw).\ label('peak_load_retail'), (self.orm['orm_lv_load_areas'].sector_peakload_industrial * gw2kw).\ label('peak_load_industrial'), (self.orm['orm_lv_load_areas'].sector_peakload_agricultural * gw2kw).\ label('peak_load_agricultural'), #self.orm['orm_lv_load_areas'].subst_id ). \ filter(self.orm['orm_lv_load_areas'].subst_id.in_(stations_list)).\ filter(((self.orm['orm_lv_load_areas'].sector_peakload_residential # only pick load areas with peak load > lv_loads_threshold + self.orm['orm_lv_load_areas'].sector_peakload_retail + self.orm['orm_lv_load_areas'].sector_peakload_industrial + self.orm['orm_lv_load_areas'].sector_peakload_agricultural) * gw2kw) > lv_loads_threshold). \ filter(self.orm['version_condition_la']) # read data from db lv_load_areas = pd.read_sql_query(lv_load_areas_sqla.statement, session.bind, index_col='id_db') return lv_load_areas
def list_load_areas(self, session, mv_districts)
list load_areas (load areas) peak load from database for a single MV grid_district Parameters ---------- session : sqlalchemy.orm.session.Session Database session mv_districts: List of MV districts
3.63307
3.528257
1.029707
gw2kw = 10 ** 6 # load in database is in GW -> scale to kW # 1. filter grid districts of relevant load area lv_grid_districs_sqla = session.query( self.orm['orm_lv_grid_district'].mvlv_subst_id, (self.orm[ 'orm_lv_grid_district'].sector_peakload_residential * gw2kw). label('peak_load_residential'), (self.orm['orm_lv_grid_district'].sector_peakload_retail * gw2kw). label('peak_load_retail'), (self.orm[ 'orm_lv_grid_district'].sector_peakload_industrial * gw2kw). label('peak_load_industrial'), (self.orm[ 'orm_lv_grid_district'].sector_peakload_agricultural * gw2kw). label('peak_load_agricultural'), ). \ filter(self.orm['orm_lv_grid_district'].mvlv_subst_id.in_( lv_stations)). \ filter(self.orm['version_condition_lvgd']) # read data from db lv_grid_districts = pd.read_sql_query(lv_grid_districs_sqla.statement, session.bind, index_col='mvlv_subst_id') return lv_grid_districts
def list_lv_grid_districts(self, session, lv_stations)
Imports all lv grid districts within given load area Parameters ---------- session : sqlalchemy.orm.session.Session Database session lv_stations: List required LV_stations==LV districts. Returns ------- pandas Dataframe Table of lv_grid_districts
3.702038
3.66558
1.009946
if not os.path.isdir(dirpath): os.mkdir(dirpath) print("We create a directory for you and your Ding0 data: {}".format( dirpath))
def create_dir(dirpath)
Create directory and report about it Parameters ---------- dirpath : str Directory including path
8.890273
10.573643
0.840796
ding0_dir = str(cfg_ding0.get('config', 'config_dir')) return os.path.join(os.path.expanduser('~'), ding0_dir)
def get_default_home_dir()
Return default home directory of Ding0 Returns ------- :any:`str` Default home directory including its path
8.341842
6.478971
1.287526
create_home_dir() create_dir(os.path.join(get_default_home_dir(), 'log')) if log_dir is None: log_dir = os.path.join(get_default_home_dir(), 'log') logger = logging.getLogger('ding0') # use filename as name in log logger.setLevel(loglevel) # create a file handler handler = logging.FileHandler(os.path.join(log_dir, 'ding0.log')) handler.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s-%(funcName)s-%(message)s (%(levelname)s)') handler.setFormatter(formatter) # create a stream handler (print to prompt) stream = logging.StreamHandler() stream.setLevel(logging.INFO) stream_formatter = logging.Formatter( '%(message)s (%(levelname)s)') stream.setFormatter(stream_formatter) # add the handlers to the logger logger.addHandler(handler) logger.addHandler(stream) logger.info('########## New run of Ding0 issued #############') return logger
def setup_logger(log_dir=None, loglevel=logging.DEBUG)
Instantiate logger Parameters ---------- log_dir: str Directory to save log, default: ~/.ding0/logging/ loglevel: Level of logger.
2.438086
2.344968
1.039709
new_solution = self.__class__(self._problem) # Clone routes for index, r in enumerate(self._routes): new_route = new_solution._routes[index] = models.Route(self._problem) for node in r.nodes(): # Insert new node on new route new_node = new_solution._nodes[node.name()] new_route.allocate([new_node]) # remove empty routes from new solution new_solution._routes = [route for route in new_solution._routes if route._nodes] return new_solution
def clone(self)
Returns a deep copy of self Function clones: * routes * allocation * nodes Returns ------- SavingsSolution A clone (deepcopy) of the instance itself
4.586227
4.165761
1.100934
allocated = all( [node.route_allocation() is not None for node in list(self._nodes.values()) if node.name() != self._problem.depot().name()] ) valid_routes = len(self._routes) == 1 #workaround: try to use only one route (otherwise process will stop if no of vehicles is reached) return allocated and valid_routes
def is_complete(self)
Returns True if this is a complete solution, i.e, all nodes are allocated Todo ---- TO BE REVIEWED Returns ------- bool True if this is a complete solution.
13.221589
14.877498
0.888697
# TODO: check docstring a, b = pair new_solution = self.clone() i, j = new_solution.get_pair((a, b)) route_i = i.route_allocation() route_j = j.route_allocation() inserted = False if ((route_i is not None and route_j is not None) and (route_i != route_j)): if route_i._nodes.index(i) == 0 and route_j._nodes.index(j) == len(route_j._nodes) - 1: if route_j.can_allocate(route_i._nodes): route_j.allocate(route_i._nodes) if i.route_allocation() != j.route_allocation(): raise Exception('wtf') inserted = True elif route_j._nodes.index(j) == 0 and route_i._nodes.index(i) == len(route_i._nodes) - 1: if route_i.can_allocate(route_j._nodes): route_i.allocate(route_j._nodes) if i.route_allocation() != j.route_allocation(): raise Exception('wtf j') inserted = True new_solution._routes = [route for route in new_solution._routes if route._nodes] return new_solution, inserted
def process(self, pair)
Processes a pair of nodes into the current solution MUST CREATE A NEW INSTANCE, NOT CHANGE ANY INSTANCE ATTRIBUTES Returns a new instance (deep copy) of self object Args ---- pair : type description Returns ------- type Description (Copy of self?)
2.682267
2.580369
1.03949
i, j = pairs # Neither points are in a route if i.route_allocation() is None or j.route_allocation() is None: return True if self._allocated == len(list(self._problem.nodes())) - 1: # All nodes in a route return False return False
def can_process(self, pairs)
Returns True if this solution can process `pairs` Parameters ---------- pairs: :any:`list` of pairs of Route List of pairs Returns ------- bool True if this solution can process `pairs`.
10.417355
11.761196
0.885739
savings_list = {} for i, j in graph.edges(): # t = (i, j) if repr(i) < repr(j): t = (i, j) else: t = (j, i) if i == graph.depot() or j == graph.depot(): continue savings_list[t] = graph.distance(graph.depot(), i) + graph.distance(graph.depot(), j) - graph.distance(i, j) sorted_savings_list = sorted(list(savings_list.items()), key=operator.itemgetter(1), reverse=True) return [nodes for nodes, saving in sorted_savings_list]
def compute_savings_list(self, graph)
Compute Clarke and Wright savings list A saving list is a matrix containing the saving amount S between i and j S is calculated by S = d(0,i) + d(0,j) - d(i,j) (CLARKE; WRIGHT, 1964) Args ---- graph: :networkx:`NetworkX Graph Obj< >` A NetworkX graaph is used. Returns ------- :any:`list` of `Node` List of nodes sorted by its savings
2.599047
2.62805
0.988964
savings_list = self.compute_savings_list(graph) solution = SavingsSolution(graph) start = time.time() for i, j in savings_list[:]: if solution.is_complete(): break if solution.can_process((i, j)): solution, inserted = solution.process((i, j)) if inserted: savings_list.remove((i, j)) if anim: solution.draw_network(anim) if time.time() - start > timeout: break return solution
def solve(self, graph, timeout, debug=False, anim=None)
Solves the CVRP problem using Clarke and Wright Savings methods Parameters ---------- graph: :networkx:`NetworkX Graph Obj< >` A NetworkX graaph is used. timeout: int max processing time in seconds debug: bool, defaults to False If True, information is printed while routing anim: AnimationDing0 Returns ------- SavingsSolution A solution
3.858799
3.772271
1.022938
package_path = ding0.__path__[0] network.export_to_csv_folder(os.path.join(package_path, 'output', 'debug', 'grid', export_dir))
def export_to_dir(network, export_dir)
Exports PyPSA network as CSV files to directory Args: network: pypsa.Network export_dir: str Sub-directory in output/debug/grid/ where csv Files of PyPSA network are exported to.
10.110662
6.222351
1.624894
omega = 2 * pi * 50 srid = int(cfg_ding0.get('geo', 'srid')) lines = {'line_id': [], 'bus0': [], 'bus1': [], 'x': [], 'r': [], 's_nom': [], 'length': [], 'cables': [], 'geom': [], 'grid_id': []} # iterate over edges and add them one by one for edge in edges: line_name = '_'.join(['MV', str(grid.id_db), 'lin', str(edge['branch'].id_db)]) # TODO: find the real cause for being L, C, I_th_max type of Series if (isinstance(edge['branch'].type['L'], Series) or isinstance(edge['branch'].type['C'], Series)): x = omega * edge['branch'].type['L'].values[0] * 1e-3 else: x = omega * edge['branch'].type['L'] * 1e-3 if isinstance(edge['branch'].type['R'], Series): r = edge['branch'].type['R'].values[0] else: r = edge['branch'].type['R'] if (isinstance(edge['branch'].type['I_max_th'], Series) or isinstance(edge['branch'].type['U_n'], Series)): s_nom = sqrt(3) * edge['branch'].type['I_max_th'].values[0] * \ edge['branch'].type['U_n'].values[0] else: s_nom = sqrt(3) * edge['branch'].type['I_max_th'] * \ edge['branch'].type['U_n'] # get lengths of line l = edge['branch'].length / 1e3 lines['line_id'].append(line_name) lines['bus0'].append(edge['adj_nodes'][0].pypsa_id) lines['bus1'].append(edge['adj_nodes'][1].pypsa_id) lines['x'].append(x * l) lines['r'].append(r * l) lines['s_nom'].append(s_nom) lines['length'].append(l) lines['cables'].append(3) lines['geom'].append(from_shape( LineString([edge['adj_nodes'][0].geo_data, edge['adj_nodes'][1].geo_data]), srid=srid)) lines['grid_id'].append(grid.id_db) return {'Line': DataFrame(lines).set_index('line_id')}
def edges_to_dict_of_dataframes(grid, edges)
Export edges to DataFrame Parameters ---------- grid: ding0.Network edges: list Edges of Ding0.Network graph Returns ------- edges_dict: dict
3.161149
3.149192
1.003797
scenario = cfg_ding0.get("powerflow", "test_grid_stability_scenario") start_hour = cfg_ding0.get("powerflow", "start_hour") end_hour = cfg_ding0.get("powerflow", "end_hour") # choose temp_id temp_id_set = 1 timesteps = 2 start_time = datetime(1970, 1, 1, 00, 00, 0) resolution = 'H' # inspect grid data for integrity if debug: data_integrity(components, components_data) # define investigated time range timerange = DatetimeIndex(freq=resolution, periods=timesteps, start=start_time) # TODO: Instead of hard coding PF config, values from class PFConfigDing0 can be used here. # create PyPSA powerflow problem network, snapshots = create_powerflow_problem(timerange, components) # import pq-sets for key in ['Load', 'Generator']: for attr in ['p_set', 'q_set']: # catch MV grid districts without generators if not components_data[key].empty: series = transform_timeseries4pypsa(components_data[key][ attr].to_frame(), timerange, column=attr) import_series_from_dataframe(network, series, key, attr) series = transform_timeseries4pypsa(components_data['Bus'] ['v_mag_pu_set'].to_frame(), timerange, column='v_mag_pu_set') import_series_from_dataframe(network, series, 'Bus', 'v_mag_pu_set') # add coordinates to network nodes and make ready for map plotting # network = add_coordinates(network) # start powerflow calculations network.pf(snapshots) # # make a line loading plot # # TODO: make this optional # plot_line_loading(network, timestep=0, # filename='Line_loading_load_case.png') # plot_line_loading(network, timestep=1, # filename='Line_loading_feed-in_case.png') # process results bus_data, line_data = process_pf_results(network) # assign results data to graph assign_bus_results(grid, bus_data) assign_line_results(grid, line_data) # export network if directory is specified if export_pypsa_dir: export_to_dir(network, export_dir=export_pypsa_dir)
def run_powerflow_onthefly(components, components_data, grid, export_pypsa_dir=None, debug=False)
Run powerflow to test grid stability Two cases are defined to be tested here: i) load case ii) feed-in case Parameters ---------- components: dict of pandas.DataFrame components_data: dict of pandas.DataFrame export_pypsa_dir: str Sub-directory in output/debug/grid/ where csv Files of PyPSA network are exported to. Export is omitted if argument is empty.
5.517355
5.210369
1.058918
data_check = {} for comp in ['Bus', 'Load']: # list(components_data.keys()): data_check[comp] = {} data_check[comp]['length_diff'] = len(components[comp]) - len( components_data[comp]) # print short report to user and exit program if not integer for comp in list(data_check.keys()): if data_check[comp]['length_diff'] != 0: logger.exception("{comp} data is invalid. You supplied {no_comp} {comp} " "objects and {no_data} datasets. Check you grid data " "and try again".format(comp=comp, no_comp=len(components[comp]), no_data=len(components_data[comp]))) sys.exit(1)
def data_integrity(components, components_data)
Check grid data for integrity Parameters ---------- components: dict Grid components components_data: dict Grid component data (such as p,q and v set points) Returns -------
5.014255
5.117386
0.979847
# iterate of nodes and assign voltage obtained from power flow analysis for node in grid._graph.nodes(): # check if node is connected to graph if (node not in grid.graph_isolated_nodes() and not isinstance(node, LVLoadAreaCentreDing0)): if isinstance(node, LVStationDing0): node.voltage_res = bus_data.loc[node.pypsa_id, 'v_mag_pu'] elif isinstance(node, (LVStationDing0, LVLoadAreaCentreDing0)): if node.lv_load_area.is_aggregated: node.voltage_res = bus_data.loc[node.pypsa_id, 'v_mag_pu'] elif not isinstance(node, CircuitBreakerDing0): node.voltage_res = bus_data.loc[node.pypsa_id, 'v_mag_pu'] else: logger.warning("Object {} has been skipped while importing " "results!")
def assign_bus_results(grid, bus_data)
Write results obtained from PF to graph Parameters ---------- grid: ding0.network bus_data: pandas.DataFrame DataFrame containing voltage levels obtained from PF analysis
6.000456
5.580734
1.075209
package_path = ding0.__path__[0] edges = [edge for edge in grid.graph_edges() if (edge['adj_nodes'][0] in grid._graph.nodes() and not isinstance( edge['adj_nodes'][0], LVLoadAreaCentreDing0)) and ( edge['adj_nodes'][1] in grid._graph.nodes() and not isinstance( edge['adj_nodes'][1], LVLoadAreaCentreDing0))] decimal_places = 6 for edge in edges: s_res = [ round(sqrt( max(abs(line_data.loc["MV_{0}_lin_{1}".format(grid.id_db, edge[ 'branch'].id_db), 'p0'][0]), abs(line_data.loc["MV_{0}_lin_{1}".format(grid.id_db, edge[ 'branch'].id_db), 'p1'][0])) ** 2 + max(abs(line_data.loc["MV_{0}_lin_{1}".format(grid.id_db, edge[ 'branch'].id_db), 'q0'][0]), abs(line_data.loc["MV_{0}_lin_{1}".format(grid.id_db, edge[ 'branch'].id_db), 'q1'][0])) ** 2),decimal_places), round(sqrt( max(abs(line_data.loc["MV_{0}_lin_{1}".format(grid.id_db, edge[ 'branch'].id_db), 'p0'][1]), abs(line_data.loc["MV_{0}_lin_{1}".format(grid.id_db, edge[ 'branch'].id_db), 'p1'][1])) ** 2 + max(abs(line_data.loc["MV_{0}_lin_{1}".format(grid.id_db, edge[ 'branch'].id_db), 'q0'][1]), abs(line_data.loc["MV_{0}_lin_{1}".format(grid.id_db, edge[ 'branch'].id_db), 'q1'][1])) ** 2),decimal_places)] edge['branch'].s_res = s_res
def assign_line_results(grid, line_data)
Write results obtained from PF to graph Parameters ----------- grid: ding0.network line_data: pandas.DataFrame DataFrame containing active/reactive at nodes obtained from PF analysis
2.332367
2.209295
1.055707
network = Network() network.set_snapshots(time_range_lim) snapshots = network.snapshots return network, snapshots
def init_pypsa_network(time_range_lim)
Instantiate PyPSA network Parameters ---------- time_range_lim: Returns ------- network: PyPSA network object Contains powerflow problem snapshots: iterable Contains snapshots to be analyzed by powerplow calculation
4.374693
4.792657
0.912791
timeseries.index = [str(i) for i in timeseries.index] if column is None: pypsa_timeseries = timeseries.apply( Series).transpose().set_index(timerange) else: pypsa_timeseries = timeseries[column].apply( Series).transpose().set_index(timerange) return pypsa_timeseries
def transform_timeseries4pypsa(timeseries, timerange, column=None)
Transform pq-set timeseries to PyPSA compatible format Parameters ---------- timeseries: Pandas DataFrame Containing timeseries Returns ------- pypsa_timeseries: Pandas DataFrame Reformated pq-set timeseries
2.650036
2.755164
0.961844
# initialize powerflow problem network, snapshots = init_pypsa_network(timerange) # add components to network for component in components.keys(): network.import_components_from_dataframe(components[component], component) return network, snapshots
def create_powerflow_problem(timerange, components)
Create PyPSA network object and fill with data Parameters ---------- timerange: Pandas DatetimeIndex Time range to be analyzed by PF components: dict Returns ------- network: PyPSA powerflow problem object
5.540483
4.63675
1.194907
'''Organize parallel runs of ding0. The function take all districts in a list and divide them into n_of_processes parallel processes. For each process, the assigned districts are given to the function process_runs() with the argument n_of_districts Parameters ---------- districts_list: list of int List with all districts to be run. n_of_processes: int Number of processes to run in parallel n_of_districts: int Number of districts to be run in each cluster given as argument to process_stats() run_id: str Identifier for a run of Ding0. For example it is used to create a subdirectory of os.path.join(`base_path`, 'results') base_path : str Base path for ding0 data (input, results and logs). Default is `None` which sets it to :code:`~/.ding0` (may deviate on windows systems). Specify your own but keep in mind that it a required a particular structure of subdirectories. See Also -------- ding0_runs ''' # define base path if base_path is None: base_path = BASEPATH if not os.path.exists(os.path.join(base_path, run_id)): os.makedirs(os.path.join(base_path, run_id)) start = time.time() ####################################################################### # Define an output queue output_info = mp.Queue() ####################################################################### # Setup a list of processes that we want to run max_dist = len(districts_list) threat_long = floor(max_dist / n_of_processes) if threat_long == 0: threat_long = 1 threats = [districts_list[x:x + threat_long] for x in range(0, len(districts_list), threat_long)] processes = [] for th in threats: mv_districts = th processes.append(mp.Process(target=process_runs, args=(mv_districts, n_of_districts, output_info, run_id, base_path))) ####################################################################### # Run processes for p in processes: p.start() # Resque output_info from processes output = [output_info.get() for p in processes] output = list(itertools.chain.from_iterable(output)) # Exit the completed processes for p in processes: p.join() ####################################################################### print('Elapsed time for', str(max_dist), 'MV grid districts (seconds): {}'.format(time.time() - start)) return output
def parallel_run(districts_list, n_of_processes, n_of_districts, run_id, base_path=None)
Organize parallel runs of ding0. The function take all districts in a list and divide them into n_of_processes parallel processes. For each process, the assigned districts are given to the function process_runs() with the argument n_of_districts Parameters ---------- districts_list: list of int List with all districts to be run. n_of_processes: int Number of processes to run in parallel n_of_districts: int Number of districts to be run in each cluster given as argument to process_stats() run_id: str Identifier for a run of Ding0. For example it is used to create a subdirectory of os.path.join(`base_path`, 'results') base_path : str Base path for ding0 data (input, results and logs). Default is `None` which sets it to :code:`~/.ding0` (may deviate on windows systems). Specify your own but keep in mind that it a required a particular structure of subdirectories. See Also -------- ding0_runs
4.271521
1.998627
2.137227
'''Runs a process organized by parallel_run() The function take all districts mv_districts and divide them into clusters of n_of_districts each. For each cluster, ding0 is run and the resulting network is saved as a pickle Parameters ---------- mv_districts: list of int List with all districts to be run. n_of_districts: int Number of districts in a cluster output_info: Info about how the run went run_id: str Identifier for a run of Ding0. For example it is used to create a subdirectory of os.path.join(`base_path`, 'results') base_path : str Base path for ding0 data (input, results and logs). Default is `None` which sets it to :code:`~/.ding0` (may deviate on windows systems). Specify your own but keep in mind that it a required a particular structure of subdirectories. See Also -------- parallel_run ''' ####################################################################### # database connection/ session engine = db.connection(section='oedb') session = sessionmaker(bind=engine)() ############################# clusters = [mv_districts[x:x + n_of_districts] for x in range(0, len(mv_districts), n_of_districts)] output_clusters= [] for cl in clusters: print('\n########################################') print(' Running ding0 for district', cl) print('########################################') nw_name = 'ding0_grids_' + str(cl[0]) if not cl[0] == cl[-1]: nw_name = nw_name+'_to_'+str(cl[-1]) nw = NetworkDing0(name=nw_name) try: msg = nw.run_ding0(session=session, mv_grid_districts_no=cl) if msg: status = 'run error' else: msg = '' status = 'OK' results.save_nd_to_pickle(nw, os.path.join(base_path, run_id)) output_clusters.append((nw_name,status,msg, nw.metadata)) except Exception as e: output_clusters.append((nw_name, 'corrupt dist', e, nw.metadata)) continue output_info.put(output_clusters)
def process_runs(mv_districts, n_of_districts, output_info, run_id, base_path)
Runs a process organized by parallel_run() The function take all districts mv_districts and divide them into clusters of n_of_districts each. For each cluster, ding0 is run and the resulting network is saved as a pickle Parameters ---------- mv_districts: list of int List with all districts to be run. n_of_districts: int Number of districts in a cluster output_info: Info about how the run went run_id: str Identifier for a run of Ding0. For example it is used to create a subdirectory of os.path.join(`base_path`, 'results') base_path : str Base path for ding0 data (input, results and logs). Default is `None` which sets it to :code:`~/.ding0` (may deviate on windows systems). Specify your own but keep in mind that it a required a particular structure of subdirectories. See Also -------- parallel_run
6.065207
2.51138
2.415089
mvgds = [] metadata = meta[0] for mvgd in meta: if isinstance(mvgd['mv_grid_districts'], list): mvgds.extend(mvgd['mv_grid_districts']) else: mvgds.append(mvgd['mv_grid_districts']) metadata['mv_grid_districts'] = mvgds return metadata
def process_metadata(meta)
Merge metadata of run on multiple grid districts Parameters ---------- meta: list of dict Metadata of run of each MV grid district Returns ------- dict Single metadata dict including merge metadata
3.252286
2.956047
1.100215
nd = results.load_nd_from_pickle(filename=filename) nodes_df, edges_df = nd.to_dataframe() # get statistical numbers about grid stats = results.calculate_mvgd_stats(nd) # plot distribution of load/generation of subjacent LV grids stations = nodes_df[nodes_df['type'] == 'LV Station'] f, axarr = plt.subplots(2, sharex=True) f.suptitle("Peak load (top) / peak generation capacity (bottom) at LV " "substations in kW") stations['peak_load'].hist(bins=20, alpha=0.5, ax=axarr[0]) axarr[0].set_title("Peak load in kW") stations['generation_capacity'].hist(bins=20, alpha=0.5, ax=axarr[1]) axarr[1].set_title("Peak generation capacity in kW") plt.show() # Introduction of report print("You are analyzing MV grid district {mvgd}\n".format( mvgd=int(stats.index.values))) # print all the calculated stats # this isn't a particularly beautiful format but it is # information rich with option_context('display.max_rows', None, 'display.max_columns', None, 'display.max_colwidth', -1): print(stats.T)
def example_stats(filename)
Obtain statistics from create grid topology Prints some statistical numbers and produces exemplary figures
5.791316
5.754395
1.006416
# get path package_path = ding0.__path__[0] file = path.join(package_path, 'output', 'debug', 'graph1.gpickle') if mode == 'write': try: nx.write_gpickle(graph1, file) print('=====> DEBUG: Graph written to', file) except: raise FileNotFoundError('Could not write to file', file) elif mode == 'compare': if graph2 is None: try: graph2 = nx.read_gpickle(file) print('=====> DEBUG: Graph read from', file) except: raise FileNotFoundError('File not found:', file) # get data nodes1 = sorted(graph1.nodes(), key=lambda _: repr(_)) nodes2 = sorted(graph2.nodes(), key=lambda _: repr(_)) edges1 = sorted(graph1.edges(), key=lambda _: repr(_)) edges2 = sorted(graph2.edges(), key=lambda _: repr(_)) graphs_are_isomorphic = True # check nodes if len(nodes1) > len(nodes2): print('Node count in graph 1 > node count in graph 2') print('Difference:', [node for node in nodes1 if repr(node) not in repr(nodes2)]) graphs_are_isomorphic = False elif len(nodes2) > len(nodes1): print('Node count in graph 2 > node count in graph 1') print('Difference:', [node for node in nodes2 if repr(node) not in repr(nodes1)]) graphs_are_isomorphic = False # check edges if len(edges1) > len(edges2): print('Edge count in graph 1 > edge count in graph 2') print('Difference:', [edge for edge in edges1 if (repr(edge) not in repr(edges2)) and (repr(tuple(reversed(edge))) not in repr(edges2))]) graphs_are_isomorphic = False elif len(edges2) > len(edges1): print('Edge count in graph 2 > edge count in graph 1') print('Difference:', [edge for edge in edges2 if (repr(edge) not in repr(edges1)) and (repr(tuple(reversed(edge))) not in repr(edges1))]) graphs_are_isomorphic = False elif (len(edges1) == len(edges1)) and (len([edge for edge in edges1 if (repr(edge) not in repr(edges2)) and (repr(tuple(reversed(edge))) not in repr(edges2))]) > 0): print('Edge count in graph 1 = edge count in graph 2') print('Difference:', [edge for edge in edges2 if (repr(edge) not in repr(edges1)) and (repr(tuple(reversed(edge))) not in repr(edges1))]) graphs_are_isomorphic = False if graphs_are_isomorphic: print('=====> DEBUG: Graphs are isomorphic') else: print('=====> DEBUG: Graphs are NOT isomorphic') else: raise ValueError('Invalid value for mode, use mode=\'write\' or \'compare\'') exit(0)
def compare_graphs(graph1, mode, graph2=None)
Compares graph with saved one which is loaded via networkx' gpickle Parameters ---------- graph1 : networkx.graph First Ding0 MV graph for comparison graph2 : networkx.graph Second Ding0 MV graph for comparison. If a second graph is not provided it will be laoded from disk with hard-coded file name. mode: 'write' or 'compare' Returns:
1.826323
1.763043
1.035893
if mode == 'MV': return sum([_.capacity for _ in self.grid.generators()]) elif mode == 'MVLV': # calc MV geno capacities cum_mv_peak_generation = sum([_.capacity for _ in self.grid.generators()]) # calc LV geno capacities cum_lv_peak_generation = 0 for load_area in self.grid.grid_district.lv_load_areas(): cum_lv_peak_generation += load_area.peak_generation return cum_mv_peak_generation + cum_lv_peak_generation else: raise ValueError('parameter \'mode\' is invalid!')
def peak_generation(self, mode)
Calculates cumulative peak generation of generators connected to underlying grids This is done instantaneously using bottom-up approach. Parameters ---------- mode: str determines which generators are included:: 'MV': Only generation capacities of MV level are considered. 'MVLV': Generation capacities of MV and LV are considered (= cumulative generation capacities in entire MVGD). Returns ------- float Cumulative peak generation
4.955123
3.553045
1.394613
mv_station_v_level_operation = float(cfg_ding0.get('mv_routing_tech_constraints', 'mv_station_v_level_operation')) self.v_level_operation = mv_station_v_level_operation * self.grid.v_level
def set_operation_voltage_level(self)
Set operation voltage level
11.792414
11.723053
1.005917
#TODO: docstring return '_'.join(['MV', str( self.grid.grid_district.lv_load_area.mv_grid_district.mv_grid.\ id_db), 'tru', str(self.id_db)])
def pypsa_id(self)
Description
25.518593
24.342222
1.048326
if not os.path.exists(base_path): print("Creating directory {} for results data.".format(base_path)) os.mkdir(base_path) if not os.path.exists(os.path.join(base_path, 'results')): os.mkdir(os.path.join(base_path, 'results')) if not os.path.exists(os.path.join(base_path, 'plots')): os.mkdir(os.path.join(base_path, 'plots')) if not os.path.exists(os.path.join(base_path, 'info')): os.mkdir(os.path.join(base_path, 'info')) if not os.path.exists(os.path.join(base_path, 'log')): os.mkdir(os.path.join(base_path, 'log'))
def create_results_dirs(base_path)
Create base path dir and subdirectories Parameters ---------- base_path : str The base path has subdirectories for raw and processed results
1.427301
1.490328
0.95771
start = time.time() # define base path if base_path is None: base_path = BASEPATH # database connection/ session engine = db.connection(section='oedb') session = sessionmaker(bind=engine)() corrupt_grid_districts = pd.DataFrame(columns=['id', 'message']) for mvgd in mv_grid_districts: # instantiate ding0 network object nd = NetworkDing0(name='network', run_id=run_id) if not os.path.exists(os.path.join(base_path, "grids")): os.mkdir(os.path.join(base_path, "grids")) if not failsafe: # run DING0 on selected MV Grid District msg = nd.run_ding0(session=session, mv_grid_districts_no=[mvgd]) # save results results.save_nd_to_pickle(nd, os.path.join(base_path, "grids")) else: # try to perform ding0 run on grid district try: msg = nd.run_ding0(session=session, mv_grid_districts_no=[mvgd]) # if not successful, put grid district to report if msg: corrupt_grid_districts = corrupt_grid_districts.append( pd.Series({'id': mvgd, 'message': msg[0]}), ignore_index=True) # if successful, save results else: results.save_nd_to_pickle(nd, os.path.join(base_path, "grids")) except Exception as e: corrupt_grid_districts = corrupt_grid_districts.append( pd.Series({'id': mvgd, 'message': e}), ignore_index=True) continue # Merge metadata of multiple runs if 'metadata' not in locals(): metadata = nd.metadata else: if isinstance(mvgd, list): metadata['mv_grid_districts'].extend(mvgd) else: metadata['mv_grid_districts'].append(mvgd) # Save metadata to disk with open(os.path.join(base_path, "grids", 'Ding0_{}.meta'.format(run_id)), 'w') as f: json.dump(metadata, f) # report on unsuccessful runs corrupt_grid_districts.to_csv( os.path.join( base_path, "grids", 'corrupt_mv_grid_districts.txt'), index=False, float_format='%.0f') print('Elapsed time for', str(len(mv_grid_districts)), 'MV grid districts (seconds): {}'.format(time.time() - start)) return msg
def run_multiple_grid_districts(mv_grid_districts, run_id, failsafe=False, base_path=None)
Perform ding0 run on given grid districts Parameters ---------- mv_grid_districs : list Integers describing grid districts run_id: str Identifier for a run of Ding0. For example it is used to create a subdirectory of os.path.join(`base_path`, 'results') failsafe : bool Setting to True enables failsafe mode where corrupt grid districts (mostly due to data issues) are reported and skipped. Report is to be found in the log dir under :code:`~/.ding0` . Default is False. base_path : str Base path for ding0 data (input, results and logs). Default is `None` which sets it to :code:`~/.ding0` (may deviate on windows systems). Specify your own but keep in mind that it a required a particular structure of subdirectories. Returns ------- msg : str Traceback of error computing corrupt MV grid district .. TODO: this is only true if try-except environment is moved into this fundion and traceback return is implemented Notes ----- Consider that a large amount of MV grid districts may take hours or up to days to compute. A computational run for a single grid district may consume around 30 secs.
3.0826
2.897764
1.063786
package_path = ding0.__path__[0] FILE = path.join(package_path, 'config', filename) try: cfg.read(FILE) global _loaded _loaded = True except: logger.exception("configfile not found.")
def load_config(filename)
Read config file specified by `filename` Parameters ---------- filename : str Description of filename
8.396561
10.273497
0.817303
if not _loaded: init() if not cfg.has_section(section): cfg.add_section(section) cfg.set(section, key, value) with open(FILE, 'w') as configfile: cfg.write(configfile)
def set(section, key, value)
Sets a value to a [section] key - pair. if the section doesn't exist yet, it will be created. Parameters ---------- section: str the section. key: str the key. value: float, int, str the value. See Also -------- get :
2.616455
3.979307
0.657515
depots = [] for line in f: line = strip(line) if line == '-1' or line == 'EOF': # End of section break else: depots.append(line) if len(depots) != 1: raise ParseException('One and only one depot is supported') return int(depots[0])
def _parse_depot_section(f)
Parse TSPLIB DEPOT_SECTION data part from file descriptor f Args ---- f : str File descriptor Returns ------- int an array of depots
3.786804
4.251743
0.890648
section = {} dimensions = None if current_section == 'NODE_COORD_SECTION': dimensions = 3 # i: (i, j) elif current_section == 'DEMAND_SECTION': dimensions = 2 # i: q else: raise ParseException('Invalid section {}'.format(current_section)) n = 0 for line in f: line = strip(line) # Check dimensions definitions = re.split(r'\s*', line) if len(definitions) != dimensions: raise ParseException('Invalid dimensions from section {}. Expected: {}'.format(current_section, dimensions)) node = int(definitions[0]) values = [int(v) for v in definitions[1:]] if len(values) == 1: values = values[0] section[node] = values n = n + 1 if n == nodes: break # Assert all nodes were read if n != nodes: raise ParseException('Missing {} nodes definition from section {}'.format(nodes - n, current_section)) return section
def _parse_nodes_section(f, current_section, nodes)
Parse TSPLIB NODE_COORD_SECTION or DEMAND_SECTION from file descript f Returns a dict containing the node as key
3.387182
3.213538
1.054035
matrix = [] n = 0 for line in f: line = strip(line) regex = re.compile(r'\s+') row = regex.split(line) matrix.append(row) n = n + 1 if n == nodes: break if n != nodes: raise ParseException('Missing {} nodes definition from section EDGE_WEIGHT_SECTION'.format(nodes - n)) return matrix
def _parse_edge_weight(f, nodes)
Parse TSPLIB EDGE_WEIGHT_SECTION from file f Supports only FULL_MATRIX for now
4.646675
3.805953
1.220897
x1, y1 = a x2, y2 = b return int(round(math.sqrt(((x1 - x2) ** 2) + (((y1 - y2) ** 2)))))
def calculate_euc_distance(a, b)
Calculates Eclidian distances from two points a and b Args ---- a : (:obj:`float`, :obj:`float`) Two-dimension tuple (x1,y1) b : (:obj:`float`, :obj:`float`) Two-dimension tuple (x2,y2) Returns ------- float the distance.
2.653337
4.000877
0.663189
integer_specs = ['DIMENSION', 'CAPACITY'] for s in integer_specs: specs[s] = int(specs[s])
def _post_process_specs(specs)
Post-process specs after pure parsing Casts any number expected values into integers Args ---- specs : Notes ----- Modifies the specs object
7.534002
8.838264
0.85243
distances = specs['NODE_COORD_SECTION'] specs['MATRIX'] = {} for i in distances: origin = tuple(distances[i]) specs['MATRIX'][i] = {} for j in specs['NODE_COORD_SECTION']: destination = tuple(distances[j]) distance = calculate_euc_distance(origin, destination) # # Upper triangular matrix # if i > j, ij = 0 # #if i > j: # continue specs['MATRIX'][i][j] = distance
def _create_node_matrix_from_coord_section(specs)
Transformed parsed data from NODE_COORD_SECTION into an upper triangular matrix Calculates distances between nodes 'MATRIX' key added to `specs`
5.075359
4.411198
1.150562
old_matrix = specs['EDGE_WEIGHT_SECTION'] nodes = specs['DIMENSION'] specs['MATRIX'] = {} for i in range(nodes): specs['MATRIX'][i + 1] = {} for j in range(nodes): if i > j: continue specs['MATRIX'][i + 1][j + 1] = int(old_matrix[i][j])
def _create_node_matrix_from_full_matrix(specs)
Transform parsed data from EDGE_WEIGHT_SECTION into an upper triangular matrix 'MATRIX' key added to `specs`
3.501353
2.697866
1.297823
line = '' specs = {} used_specs = ['NAME', 'COMMENT', 'DIMENSION', 'CAPACITY', 'TYPE', 'EDGE_WEIGHT_TYPE'] used_data = ['DEMAND_SECTION', 'DEPOT_SECTION'] # Parse specs part for line in f: line = strip(line) # Arbitrary sort, so we test everything out s = None for s in used_specs: if line.startswith(s): specs[s] = line.split('{} :'.format(s))[-1].strip() # get value data part break if s == 'EDGE_WEIGHT_TYPE' and s in specs and specs[s] == 'EXPLICIT': used_specs.append('EDGE_WEIGHT_FORMAT') # All specs read if len(specs) == len(used_specs): break if len(specs) != len(used_specs): missing_specs = set(used_specs).symmetric_difference(set(specs)) raise ParseException('Error parsing TSPLIB data: specs {} missing'.format(missing_specs)) print(specs) if specs['EDGE_WEIGHT_TYPE'] == 'EUC_2D': used_data.append('NODE_COORD_SECTION') elif specs['EDGE_WEIGHT_FORMAT'] == 'FULL_MATRIX': used_data.append('EDGE_WEIGHT_SECTION') else: raise ParseException('EDGE_WEIGHT_TYPE or EDGE_WEIGHT_FORMAT not supported') _post_process_specs(specs) # Parse data part for line in f: line = strip(line) for d in used_data: if line.startswith(d): if d == 'DEPOT_SECTION': specs[d] = _parse_depot_section(f) elif d in ['NODE_COORD_SECTION', 'DEMAND_SECTION']: specs[d] = _parse_nodes_section(f, d, specs['DIMENSION']) elif d == 'EDGE_WEIGHT_SECTION': specs[d] = _parse_edge_weight(f, specs['DIMENSION']) if len(specs) == len(used_specs) + len(used_data): break if len(specs) != len(used_specs) + len(used_data): missing_specs = set(specs).symmetric_difference(set(used_specs).union(set(used_data))) raise ParseException('Error parsing TSPLIB data: specs {} missing'.format(missing_specs)) _post_process_data(specs) return specs
def _parse_tsplib(f)
Parses a TSPLIB file descriptor and returns a dict containing the problem definition
2.533666
2.496788
1.01477
sanitized_filename = sanitize(filename) f = open(sanitized_filename) specs = None try: specs = _parse_tsplib(f) except ParseException: raise finally: # 'finally' is executed even when we re-raise exceptions f.close() if specs['TYPE'] != 'CVRP': raise Exception('Not a CVRP TSPLIB problem. Found: {}'.format(specs['TYPE'])) #additional params for graph/network (temporary) specs['VOLTAGE'] = 20000 specs['CABLETYPE'] = 1 #return (Graph(specs), specs) return Graph(specs)
def read_file(filename)
Reads a TSPLIB file and returns the problem data. Args ---- filename: str Returns ------- type Problem specs.
7.990669
7.624333
1.048048
if circ_breaker not in self._circuit_breakers and isinstance(circ_breaker, CircuitBreakerDing0): self._circuit_breakers.append(circ_breaker) self.graph_add_node(circ_breaker)
def add_circuit_breaker(self, circ_breaker)
Creates circuit breaker object and ... Args ---- circ_breaker: CircuitBreakerDing0 Description #TODO
4.181474
2.696841
1.550508
if not isinstance(mv_station, MVStationDing0): raise Exception('Given MV station is not a MVStationDing0 object.') if self._station is None: self._station = mv_station self.graph_add_node(mv_station) else: if force: self._station = mv_station else: raise Exception('MV Station already set, use argument `force=True` to override.')
def add_station(self, mv_station, force=False)
Adds MV station if not already existing Args ---- mv_station: MVStationDing0 Description #TODO force: bool If True, MV Station is set even though it's not empty (override)
4.289696
3.075347
1.394866
if lv_load not in self._loads and isinstance(lv_load, MVLoadDing0): self._loads.append(lv_load) self.graph_add_node(lv_load)
def add_load(self, lv_load)
Adds a MV load to _loads and grid graph if not already existing Args ---- lv_load : float Desription #TODO
7.816561
6.581873
1.187589
if cable_dist not in self.cable_distributors() and isinstance(cable_dist, MVCableDistributorDing0): # add to array and graph self._cable_distributors.append(cable_dist) self.graph_add_node(cable_dist)
def add_cable_distributor(self, cable_dist)
Adds a cable distributor to _cable_distributors if not already existing Args ---- cable_dist : float Desription #TODO
6.527631
7.259503
0.899184
if cable_dist in self.cable_distributors() and isinstance(cable_dist, MVCableDistributorDing0): # remove from array and graph self._cable_distributors.remove(cable_dist) if self._graph.has_node(cable_dist): self._graph.remove_node(cable_dist)
def remove_cable_distributor(self, cable_dist)
Removes a cable distributor from _cable_distributors if existing
4.846809
4.848012
0.999752
if ring not in self._rings and isinstance(ring, RingDing0): self._rings.append(ring)
def add_ring(self, ring)
Adds a ring to _rings if not already existing
8.030116
5.779467
1.389422
for circ_breaker in self.circuit_breakers(): if circ_breaker.status is 'open': circ_breaker.close() logger.info('Circuit breakers were closed in order to find MV ' 'rings') for ring in nx.cycle_basis(self._graph, root=self._station): if not include_root_node: ring.remove(self._station) if include_satellites: ring_nodes = ring satellites = [] for ring_node in ring: # determine all branches diverging from each ring node satellites.append( self.graph_nodes_from_subtree( ring_node, include_root_node=include_root_node ) ) # return ring and satellite nodes (flatted list of lists) yield ring + [_ for sublist in satellites for _ in sublist] else: yield ring
def rings_nodes(self, include_root_node=False, include_satellites=False)
Returns a generator for iterating over rings (=routes of MVGrid's graph) Args ---- include_root_node: bool, defaults to False If True, the root node is included in the list of ring nodes. include_satellites: bool, defaults to False If True, the satellite nodes (nodes that diverge from ring nodes) is included in the list of ring nodes. Yields ------ :any:`list` of :obj:`GridDing0` List with nodes of each ring of _graph in- or excluding root node (HV/MV station) (arg `include_root_node`), format:: [ ring_m_node_1, ..., ring_m_node_n ] Notes ----- Circuit breakers must be closed to find rings, this is done automatically.
5.1806
4.008151
1.292516
#close circuit breakers for circ_breaker in self.circuit_breakers(): if not circ_breaker.status == 'closed': circ_breaker.close() logger.info('Circuit breakers were closed in order to find MV ' 'rings') #find True rings (cycles from station through breaker and back to station) for ring_nodes in nx.cycle_basis(self._graph, root=self._station): edges_ring = [] for node in ring_nodes: for edge in self.graph_branches_from_node(node): nodes_in_the_branch = self.graph_nodes_from_branch(edge[1]['branch']) if (nodes_in_the_branch[0] in ring_nodes and nodes_in_the_branch[1] in ring_nodes ): if not edge[1]['branch'] in edges_ring: edges_ring.append(edge[1]['branch']) yield (edges_ring[0].ring,edges_ring,ring_nodes)
def rings_full_data(self)
Returns a generator for iterating over each ring Yields ------ For each ring, tuple composed by ring ID, list of edges, list of nodes Notes ----- Circuit breakers must be closed to find rings, this is done automatically.
5.083287
4.362052
1.165343
if node_source in self._graph.nodes(): # get all nodes that are member of a ring node_ring = [] for ring in self.rings_nodes(include_root_node=include_root_node): if node_source in ring: node_ring = ring break # result set nodes_subtree = set() # get nodes from subtree if node_source in node_ring: for path in nx.shortest_path(self._graph, node_source).values(): if len(path)>1: if (path[1] not in node_ring) and (path[1] is not self.station()): nodes_subtree.update(path[1:len(path)]) else: raise ValueError(node_source, 'is not member of ring.') else: raise ValueError(node_source, 'is not member of graph.') return list(nodes_subtree)
def graph_nodes_from_subtree(self, node_source, include_root_node=False)
Finds all nodes of a tree that is connected to `node_source` and are (except `node_source`) not part of the ring of `node_source` (traversal of graph from `node_source` excluding nodes along ring). Example ------- A given graph with ring (edges) 0-1-2-3-4-5-0 and a tree starting at node (`node_source`) 3 with edges 3-6-7, 3-6-8-9 will return [6,7,8,9] Args ---- node_source: GridDing0 source node (Ding0 object), member of _graph include_root_node: bool, defaults to False If True, the root node is included in the list of ring nodes. Returns ------- :any:`list` of :obj:`GridDing0` List of nodes (Ding0 objects)
3.154605
3.191215
0.988528
# MV grid: ctr = 1 for branch in self.graph_edges(): branch['branch'].id_db = self.grid_district.id_db * 10**4 + ctr ctr += 1 # LV grid: for lv_load_area in self.grid_district.lv_load_areas(): for lv_grid_district in lv_load_area.lv_grid_districts(): ctr = 1 for branch in lv_grid_district.lv_grid.graph_edges(): branch['branch'].id_db = lv_grid_district.id_db * 10**7 + ctr ctr += 1
def set_branch_ids(self)
Generates and sets ids of branches for MV and underlying LV grids. While IDs of imported objects can be derived from dataset's ID, branches are created within DING0 and need unique IDs (e.g. for PF calculation).
4.5618
3.860542
1.181648
# do the routing self._graph = mv_routing.solve(graph=self._graph, debug=debug, anim=anim) logger.info('==> MV Routing for {} done'.format(repr(self))) # connect satellites (step 1, with restrictions like max. string length, max peak load per string) self._graph = mv_connect.mv_connect_satellites(mv_grid=self, graph=self._graph, mode='normal', debug=debug) logger.info('==> MV Sat1 for {} done'.format(repr(self))) # connect satellites to closest line/station on a MV ring that have not been connected in step 1 self._graph = mv_connect.mv_connect_satellites(mv_grid=self, graph=self._graph, mode='isolated', debug=debug) logger.info('==> MV Sat2 for {} done'.format(repr(self))) # connect stations self._graph = mv_connect.mv_connect_stations(mv_grid_district=self.grid_district, graph=self._graph, debug=debug) logger.info('==> MV Stations for {} done'.format(repr(self)))
def routing(self, debug=False, anim=None)
Performs routing on Load Area centres to build MV grid with ring topology. Args ---- debug: bool, defaults to False If True, information is printed while routing anim: type, defaults to None Descr #TODO
4.796125
4.545145
1.055219
self._graph = mv_connect.mv_connect_generators(self.grid_district, self._graph, debug)
def connect_generators(self, debug=False)
Connects MV generators (graph nodes) to grid (graph) Args ---- debug: bool, defaults to False If True, information is printed during process
25.06254
14.329145
1.74906
# TODO: Add more detailed description # set grid's voltage level self.set_voltage_level() # set MV station's voltage level self._station.set_operation_voltage_level() # set default branch types (normal, aggregated areas and within settlements) self.default_branch_type,\ self.default_branch_type_aggregated,\ self.default_branch_type_settle = self.set_default_branch_type(debug) # set default branch kinds self.default_branch_kind_aggregated = self.default_branch_kind self.default_branch_kind_settle = 'cable' # choose appropriate transformers for each HV/MV sub-station self._station.select_transformers()
def parametrize_grid(self, debug=False)
Performs Parametrization of grid equipment: i) Sets voltage level of MV grid, ii) Operation voltage level and transformer of HV/MV station, iii) Default branch types (normal, aggregated, settlement) Args ---- debug: bool, defaults to False If True, information is printed during process. Notes ----- It is assumed that only cables are used within settlements.
7.621422
4.71875
1.615136
if mode == 'load_density': # get power factor for loads cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load') # get load density load_density_threshold = float(cfg_ding0.get('assumptions', 'load_density_threshold')) # transform MVGD's area to epsg 3035 # to achieve correct area calculation projection = partial( pyproj.transform, pyproj.Proj(init='epsg:4326'), # source coordinate system pyproj.Proj(init='epsg:3035')) # destination coordinate system # calculate load density kw2mw = 1e-3 sqm2sqkm = 1e6 load_density = ((self.grid_district.peak_load * kw2mw / cos_phi_load) / (transform(projection, self.grid_district.geo_data).area / sqm2sqkm)) # unit MVA/km^2 # identify voltage level if load_density < load_density_threshold: self.v_level = 20 elif load_density >= load_density_threshold: self.v_level = 10 else: raise ValueError('load_density is invalid!') elif mode == 'distance': # get threshold for 20/10kV disambiguation voltage_per_km_threshold = float(cfg_ding0.get('assumptions', 'voltage_per_km_threshold')) # initial distance dist_max = 0 import time start = time.time() for node in self.graph_nodes_sorted(): if isinstance(node, LVLoadAreaCentreDing0): # calc distance from MV-LV station to LA centre dist_node = calc_geo_dist_vincenty(self.station(), node) / 1e3 if dist_node > dist_max: dist_max = dist_node # max. occurring distance to a Load Area exceeds threshold => grid operates at 20kV if dist_max >= voltage_per_km_threshold: self.v_level = 20 # not: grid operates at 10kV else: self.v_level = 10 else: raise ValueError('parameter \'mode\' is invalid!')
def set_voltage_level(self, mode='distance')
Sets voltage level of MV grid according to load density of MV Grid District or max. distance between station and Load Area. Parameters ---------- mode: str method to determine voltage level * 'load_density': Decision on voltage level is determined by load density of the considered region. Urban areas (load density of >= 1 MW/km2 according to [#]_) usually got a voltage of 10 kV whereas rural areas mostly use 20 kV. * 'distance' (default): Decision on voltage level is determined by the max. distance between Grid District's HV-MV station and Load Areas (LA's centre is used). According to [#]_ a value of 1kV/kV can be assumed. The `voltage_per_km_threshold` defines the distance threshold for distinction. (default in config = (20km+10km)/2 = 15km) References ---------- .. [#] Falk Schaller et al., "Modellierung realitätsnaher zukünftiger Referenznetze im Verteilnetzsektor zur Überprüfung der Elektroenergiequalität", Internationaler ETG-Kongress Würzburg, 2011 .. [#] Klaus Heuck et al., "Elektrische Energieversorgung", Vieweg+Teubner, Wiesbaden, 2007
5.283126
4.366649
1.209881
for lv_load_area in self.grid_district.lv_load_areas(): peak_current_node = (lv_load_area.peak_load / (3**0.5) / self.v_level) # units: kVA / kV = A if peak_current_node > peak_current_branch_max: lv_load_area.is_aggregated = True # add peak demand for all Load Areas of aggregation type self.grid_district.add_aggregated_peak_demand()
def set_nodes_aggregation_flag(self, peak_current_branch_max)
Set Load Areas with too high demand to aggregated type. Args ---- peak_current_branch_max: float Max. allowed current for line/cable
7.558287
6.904039
1.094763
# definitions for temp_resolution table temp_id = 1 timesteps = 2 start_time = datetime(1970, 1, 1, 00, 00, 0) resolution = 'H' nodes = self._graph.nodes() edges = [edge for edge in list(self.graph_edges()) if (edge['adj_nodes'][0] in nodes and not isinstance( edge['adj_nodes'][0], LVLoadAreaCentreDing0)) and (edge['adj_nodes'][1] in nodes and not isinstance( edge['adj_nodes'][1], LVLoadAreaCentreDing0))] if method == 'db': # Export node objects: Busses, Loads, Generators pypsa_io.export_nodes(self, session, nodes, temp_id, lv_transformer=False) # Export edges pypsa_io.export_edges(self, session, edges) # Create table about temporal coverage of PF analysis pypsa_io.create_temp_resolution_table(session, timesteps=timesteps, resolution=resolution, start_time=start_time) elif method == 'onthefly': nodes_dict, components_data = pypsa_io.nodes_to_dict_of_dataframes( self, nodes, lv_transformer=False) edges_dict = pypsa_io.edges_to_dict_of_dataframes(self, edges) components = tools.merge_two_dicts(nodes_dict, edges_dict) return components, components_data else: raise ValueError('Sorry, this export method does not exist!')
def export_to_pypsa(self, session, method='onthefly')
Exports MVGridDing0 grid to PyPSA database tables Peculiarities of MV grids are implemented here. Derive general export method from this and adapt to needs of LVGridDing0 Parameters ---------- session: :sqlalchemy:`SQLAlchemy session object<orm/session_basics.html>` Description method: str Specify export method:: 'db': grid data will be exported to database 'onthefly': grid data will be passed to PyPSA directly (default) Notes ----- It has to be proven that this method works for LV grids as well! Ding0 treats two stationary case of powerflow: 1) Full load: We assume no generation and loads to be set to peak load 2) Generation worst case:
4.694155
4.332776
1.083406
if method == 'db': raise NotImplementedError("Please use 'onthefly'.") elif method == 'onthefly': components, components_data = self.export_to_pypsa(session, method) pypsa_io.run_powerflow_onthefly(components, components_data, self, export_pypsa_dir=export_pypsa_dir, debug=debug)
def run_powerflow(self, session, export_pypsa_dir=None, method='onthefly', debug=False)
Performs power flow calculation for all MV grids Args ---- session: :sqlalchemy:`SQLAlchemy session object<orm/session_basics.html>` Description #TODO export_pypsa_dir: str Sub-directory in output/debug/grid/ where csv Files of PyPSA network are exported to. Export is omitted if argument is empty. method: str Specify export method:: 'db': grid data will be exported to database 'onthefly': grid data will be passed to PyPSA directly (default) debug: bool, defaults to False If True, information is printed during process Notes ----- It has to be proven that this method works for LV grids as well! Ding0 treats two stationary case of powerflow: 1) Full load: We assume no generation and loads to be set to peak load 2) Generation worst case:
3.579873
3.92795
0.911385
# bus data pypsa_io.import_pfa_bus_results(session, self) # line data pypsa_io.import_pfa_line_results(session, self)
def import_powerflow_results(self, session)
Assign results from power flow analysis to edges and nodes Parameters ---------- session: :sqlalchemy:`SQLAlchemy session object<orm/session_basics.html>` Description
4.311501
5.483427
0.786278
if not isinstance(lv_station, LVStationDing0): raise Exception('Given LV station is not a LVStationDing0 object.') if self._station is None: self._station = lv_station self.graph_add_node(lv_station) self.grid_district.lv_load_area.mv_grid_district.mv_grid.graph_add_node(lv_station)
def add_station(self, lv_station)
Adds a LV station to _station and grid graph if not already existing
6.749997
5.560253
1.213973
for load in self._loads: if (sector == 'res') and (load.string_id is not None): yield load elif (sector == 'ria') and (load.string_id is None): yield load
def loads_sector(self, sector='res')
Returns a generator for iterating over grid's sectoral loads Parameters ---------- sector: String possible values:: 'res' (residential), 'ria' (retail, industrial, agricultural) Yields ------- int Generator for iterating over loads of the type specified in `sector`.
5.45798
3.721359
1.466663
if lv_load not in self._loads and isinstance(lv_load, LVLoadDing0): self._loads.append(lv_load) self.graph_add_node(lv_load)
def add_load(self, lv_load)
Adds a LV load to _loads and grid graph if not already existing Parameters ---------- lv_load : Description #TODO
7.107799
6.108965
1.163503
if lv_cable_dist not in self._cable_distributors and isinstance(lv_cable_dist, LVCableDistributorDing0): self._cable_distributors.append(lv_cable_dist) self.graph_add_node(lv_cable_dist)
def add_cable_dist(self, lv_cable_dist)
Adds a LV cable_dist to _cable_dists and grid graph if not already existing Parameters ---------- lv_cable_dist : Description #TODO
4.452084
4.33451
1.027125
# add required transformers build_grid.transformer(self) # add branches of sectors retail/industrial and agricultural build_grid.build_ret_ind_agr_branches(self.grid_district) # add branches of sector residential build_grid.build_residential_branches(self.grid_district)
def build_grid(self)
Create LV grid graph
8.938308
8.556623
1.044607
self._graph = lv_connect.lv_connect_generators(self.grid_district, self._graph, debug)
def connect_generators(self, debug=False)
Connects LV generators (graph nodes) to grid (graph) Args ---- debug: bool, defaults to False If True, information is printed during process
29.024153
12.264546
2.366508
new_route = self.__class__(self._problem) for node in self.nodes(): # Insere new node on new route new_node = node.__class__(node._name, node._demand) new_route.allocate([new_node]) return new_route
def clone(self)
Returns a deep copy of self Function clones: * allocation * nodes Returns ------- type Deep copy of self
8.899753
7.02233
1.26735
cost = 0 depot = self._problem.depot() last = depot for i in self._nodes: a, b = last, i if a.name() > b.name(): a, b = b, a cost = cost + self._problem.distance(a, b) last = i cost = cost + self._problem.distance(depot, last) return cost
def length(self)
Returns the route length (cost) Returns ------- int Route length (cost).
3.532665
3.814966
0.926002
cost = 0 for n1, n2 in zip(nodelist[0:len(nodelist) - 1], nodelist[1:len(nodelist)]): cost += self._problem.distance(n1, n2) return cost
def length_from_nodelist(self, nodelist)
Returns the route length (cost) from the first to the last node in nodelist
3.09729
2.634732
1.175562
# TODO: check docstring # clone route and nodes new_route = self.clone() new_nodes = [node.clone() for node in nodes] if pos is None: pos = len(self._nodes) new_route._nodes = new_route._nodes[:pos] + new_nodes + new_route._nodes[pos:] new_route._demand = sum([node.demand() for node in new_route._nodes]) if new_route.tech_constraints_satisfied(): return True return False
def can_allocate(self, nodes, pos=None)
Returns True if this route can allocate nodes in `nodes` list Parameters ---------- nodes : type Desc pos : type, defaults to None Desc Returns ------- bool True if this route can allocate nodes in `nodes` list
4.2825
3.922938
1.091656
# TODO: check docstring nodes_demand = 0 for node in [node for node in nodes]: if node._allocation: node._allocation.deallocate([node]) node._allocation = self nodes_demand = nodes_demand + node.demand() if append: self._nodes.append(node) else: self._nodes.insert(0, node) self._demand = self._demand + nodes_demand
def allocate(self, nodes, append=True)
Allocates all nodes from `nodes` list in this route Parameters ---------- nodes : type Desc append : bool, defaults to True Desc
4.010721
4.670451
0.858744
# TODO: check docstring nodes_demand = 0 for node in nodes: self._nodes.remove(node) node._allocation = None nodes_demand = nodes_demand + node.demand() self._demand = self._demand - nodes_demand if self._demand < 0: raise Exception('Trying to deallocate more than previously allocated')
def deallocate(self, nodes)
Deallocates all nodes from `nodes` list from this route Parameters ---------- nodes : type Desc
4.814584
6.039943
0.797124
# TODO: check docstring node_list = [] nodes_demand = 0 for node in [node for node in nodes]: if node._allocation: node._allocation.deallocate([node]) node_list.append(node) node._allocation = self nodes_demand = nodes_demand + node.demand() self._nodes = self._nodes[:pos] + node_list + self._nodes[pos:] self._demand += nodes_demand
def insert(self, nodes, pos)
Inserts all nodes from `nodes` list into this route at position `pos` Parameters ---------- nodes : type Desc pos : type Desc
4.68025
4.446447
1.052582
# TODO: check docstring return self._nodes.index(node) != 0 and self._nodes.index(node) != len(self._nodes) - 1
def is_interior(self, node)
Returns True if node is interior to the route, i.e., not adjascent to depot Parameters ---------- nodes : type Desc Returns ------- bool True if node is interior to the route
4.963338
6.586158
0.753601
# TODO: check docstring return self._nodes.index(node) == len(self._nodes) - 1
def last(self, node)
Returns True if node is the last node in the route Parameters ---------- nodes : type Desc Returns ------- bool True if node is the last node in the route
8.441444
9.829933
0.858749
# TODO: add references (Tao) # set init value demand_diff_min = 10e6 # check possible positions in route for ctr in range(len(self._nodes)): # split route and calc demand difference route_demand_part1 = sum([node.demand() for node in self._nodes[0:ctr]]) route_demand_part2 = sum([node.demand() for node in self._nodes[ctr:len(self._nodes)]]) demand_diff = abs(route_demand_part1 - route_demand_part2) if demand_diff < demand_diff_min: demand_diff_min = demand_diff position = ctr if debug: logger.debug('sum 1={}'.format( sum([node.demand() for node in self._nodes[0:position]]))) logger.debug('sum 2={}'.format(sum([node.demand() for node in self._nodes[ position:len(self._nodes)]]))) logger.debug( 'Position of circuit breaker: {0}-{1} (sumdiff={2})'.format( self._nodes[position - 1], self._nodes[position], demand_diff_min)) return position
def calc_circuit_breaker_position(self, debug=False)
Calculates the optimal position of a circuit breaker on route. Parameters ---------- debug: bool, defaults to False If True, prints process information. Returns ------- int position of circuit breaker on route (index of last node on 1st half-ring preceding the circuit breaker) Notes ----- According to planning principles of MV grids, a MV ring is run as two strings (half-rings) separated by a circuit breaker which is open at normal operation. Assuming a ring (route which is connected to the root node at either sides), the optimal position of a circuit breaker is defined as the position (virtual cable) between two nodes where the conveyed current is minimal on the route. Instead of the peak current, the peak load is used here (assuming a constant voltage). The circuit breakers are used here for checking tech. constraints only and will be re-located after connection of satellites and stations in ding0.grid.mv_grid.tools.set_circuit_breakers References ---------- See Also -------- ding0.grid.mv_grid.tools.set_circuit_breakers
3.432492
3.479695
0.986435
# TODO: check docstring new_node = self.__class__(self._name, self._demand) return new_node
def clone(self)
Returns a deep copy of self Function clones: * allocation * nodes Returns ------- type Deep copy of self
15.253808
16.777441
0.909186
# TODO: check docstring for i in sorted(self._matrix.keys(), key=lambda x:x.name()): for j in sorted(self._matrix[i].keys(), key=lambda x:x.name()): if i != j: yield (i, j)
def edges(self)
Returns a generator for iterating over edges Yields ------ type Generator for iterating over edges.
3.874885
4.352102
0.890348
# TODO: check docstring a, b = i, j if a.name() > b.name(): a, b = b, a return self._matrix[self._nodes[a.name()]][self._nodes[b.name()]]
def distance(self, i, j)
Returns the distance between node i and node j Parameters ---------- i : type Descr j : type Desc Returns ------- float Distance between node i and node j.
4.965466
5.854031
0.848213
# choose size and amount of transformers transformer, transformer_cnt = select_transformers(grid) # create transformers and add them to station of LVGD for t in range(0, transformer_cnt): lv_transformer = TransformerDing0( grid=grid, id_db=id, v_level=0.4, s_max_longterm=transformer['S_nom'], r=transformer['R'], x=transformer['X']) # add each transformer to its station grid._station.add_transformer(lv_transformer)
def transformer(grid)
Choose transformer and add to grid's station Parameters ---------- grid: LVGridDing0 LV grid data
11.321226
8.393698
1.348777
# Choose retail/industrial and agricultural grid model model_params_ria = {} if ((lvgd.sector_count_retail + lvgd.sector_count_industrial > 0) or (lvgd.peak_load_retail + lvgd.peak_load_industrial > 0)): model_params_ria['retail/industrial'] = select_grid_model_ria( lvgd, 'retail/industrial') else: model_params_ria['retail/industrial'] = None if ((lvgd.sector_count_agricultural > 0) or (lvgd.peak_load_agricultural > 0)): model_params_ria['agricultural'] = select_grid_model_ria(lvgd, 'agricultural') else: model_params_ria['agricultural'] = None return model_params_ria
def grid_model_params_ria(lvgd)
Determine grid model parameters for LV grids of sectors retail/industrial and agricultural Parameters ---------- lvgd : LVGridDistrictDing0 Low-voltage grid district object Returns ------- :obj:`dict` Structural description of (parts of) LV grid topology
2.308316
2.101559
1.098382
# Load properties of LV typified model grids string_properties = lvgd.lv_grid.network.static_data['LV_model_grids_strings'] # Load relational table of apartment count and strings of model grid apartment_string = lvgd.lv_grid.network.static_data[ 'LV_model_grids_strings_per_grid'] # load assumtions apartment_house_branch_ratio = cfg_ding0.get("assumptions", "apartment_house_branch_ratio") population_per_apartment = cfg_ding0.get("assumptions", "population_per_apartment") # calc count of apartments to select string types apartments = round(lvgd.population / population_per_apartment) if apartments > 196: apartments = 196 # select set of strings that represent one type of model grid strings = apartment_string.loc[apartments] selected_strings = [int(s) for s in strings[strings >= 1].index.tolist()] # slice dataframe of string parameters selected_strings_df = string_properties.loc[selected_strings] # add number of occurences of each branch to df occurence_selector = [str(i) for i in selected_strings] selected_strings_df['occurence'] = strings.loc[occurence_selector].tolist() return selected_strings_df
def select_grid_model_residential(lvgd)
Selects typified model grid based on population Parameters ---------- lvgd : LVGridDistrictDing0 Low-voltage grid district object Returns ------- :pandas:`pandas.DataFrame<dataframe>` Selected string of typified model grid :pandas:`pandas.DataFrame<dataframe>` Parameters of chosen Transformer Notes ----- In total 196 distinct LV grid topologies are available that are chosen by population in the LV grid district. Population is translated to number of house branches. Each grid model fits a number of house branches. If this number exceeds 196, still the grid topology of 196 house branches is used. The peak load of the LV grid district is uniformly distributed across house branches.
6.848846
5.543379
1.2355
# Choice of typified lv model grid depends on population within lv # grid district. If no population is given, lv grid is omitted and # load is represented by lv station's peak load if lvgd.population > 0 \ and lvgd.peak_load_residential > 0: model_grid = select_grid_model_residential(lvgd) build_lv_graph_residential(lvgd, model_grid) # no residential load but population elif lvgd.population > 0 \ and lvgd.peak_load_residential == 0: logger.warning( '{} has population but no residential load. ' 'No grid is created.'.format( repr(lvgd))) # residential load but no population elif lvgd.population == 0 \ and lvgd.peak_load_residential > 0: logger.warning( '{} has no population but residential load. ' 'No grid is created and thus this load is ' 'missing in overall balance!'.format( repr(lvgd))) else: logger.info( '{} has got no residential load. ' 'No grid is created.'.format( repr(lvgd)))
def build_residential_branches(lvgd)
Based on population and identified peak load data, the according grid topology for residential sector is determined and attached to the grid graph Parameters ---------- lvgd : LVGridDistrictDing0 Low-voltage grid district object
4.906495
4.399063
1.11535