_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q16600
|
LogInterceptorMixin.add_request_log_fields
|
train
|
def add_request_log_fields(
self, log_fields: LogFields,
call_details: Union[grpc.HandlerCallDetails,
grpc.ClientCallDetails]
):
"""Add log fields related to a request to the provided log fields
:param log_fields: log fields instance to which to add the fields
:param call_details: some information regarding the call
"""
service, method = call_details.method[1:].split("/")
log_fields.add_fields({
"system": "grpc",
"span.kind": self.KIND,
"grpc.service": service,
"grpc.method": method,
})
|
python
|
{
"resource": ""
}
|
q16601
|
LogInterceptorMixin.add_response_log_fields
|
train
|
def add_response_log_fields(self, log_fields: LogFields,
start_time: datetime, err: Exception):
"""Add log fields related to a response to the provided log fields
:param log_fields: log fields instnace to which to add the fields
:param start_time: start time of the request
:param err: exception raised during the handling of the request.
"""
code = "Unknown" if err is not None else "OK"
duration = (datetime.utcnow() - start_time).total_seconds() * 1000
log_fields.add_fields({
"grpc.start_time": start_time.isoformat() + "Z",
"grpc.code": code,
"duration": "{duration}ms".format(duration=duration),
})
|
python
|
{
"resource": ""
}
|
q16602
|
select_worstcase_snapshots
|
train
|
def select_worstcase_snapshots(network):
"""
Select two worst-case snapshots from time series
Two time steps in a time series represent worst-case snapshots. These are
1. Load case: refers to the point in the time series where the
(load - generation) achieves its maximum and is greater than 0.
2. Feed-in case: refers to the point in the time series where the
(load - generation) achieves its minimum and is smaller than 0.
These two points are identified based on the generation and load time
series. In case load or feed-in case don't exist None is returned.
Parameters
----------
network : :class:`~.grid.network.Network`
Network for which worst-case snapshots are identified.
Returns
-------
:obj:`dict`
Dictionary with keys 'load_case' and 'feedin_case'. Values are
corresponding worst-case snapshots of type
:pandas:`pandas.Timestamp<timestamp>` or None.
"""
timeseries_load_feedin_case = network.timeseries.timesteps_load_feedin_case
timestamp = {}
timestamp['load_case'] = (
timeseries_load_feedin_case.residual_load.idxmax()
if max(timeseries_load_feedin_case.residual_load) > 0 else None)
timestamp['feedin_case'] = (
timeseries_load_feedin_case.residual_load.idxmin()
if min(timeseries_load_feedin_case.residual_load) < 0 else None)
return timestamp
|
python
|
{
"resource": ""
}
|
q16603
|
get_residual_load_from_pypsa_network
|
train
|
def get_residual_load_from_pypsa_network(pypsa_network):
"""
Calculates residual load in MW in MV grid and underlying LV grids.
Parameters
----------
pypsa_network : :pypsa:`pypsa.Network<network>`
The `PyPSA network
<https://www.pypsa.org/doc/components.html#network>`_ container,
containing load flow results.
Returns
-------
:pandas:`pandas.Series<series>`
Series with residual load in MW for each time step. Positiv values
indicate a higher demand than generation and vice versa. Index of the
series is a :pandas:`pandas.DatetimeIndex<datetimeindex>`
"""
residual_load = \
pypsa_network.loads_t.p_set.sum(axis=1) - (
pypsa_network.generators_t.p_set.loc[
:, pypsa_network.generators_t.p_set.columns !=
'Generator_slack'].sum(axis=1) +
pypsa_network.storage_units_t.p_set.sum(axis=1))
return residual_load
|
python
|
{
"resource": ""
}
|
q16604
|
assign_load_feedin_case
|
train
|
def assign_load_feedin_case(network):
"""
For each time step evaluate whether it is a feed-in or a load case.
Feed-in and load case are identified based on the
generation and load time series and defined as follows:
1. Load case: positive (load - generation) at HV/MV substation
2. Feed-in case: negative (load - generation) at HV/MV substation
Output of this function is written to `timesteps_load_feedin_case`
attribute of the network.timeseries (see
:class:`~.grid.network.TimeSeries`).
Parameters
----------
network : :class:`~.grid.network.Network`
Network for which worst-case snapshots are identified.
Returns
--------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe with information on whether time step is handled as load case
('load_case') or feed-in case ('feedin_case') for each time step in
`timeindex` attribute of network.timeseries.
Index of the dataframe is network.timeseries.timeindex. Columns of the
dataframe are 'residual_load' with (load - generation) in kW at HV/MV
substation and 'case' with 'load_case' for positive residual load and
'feedin_case' for negative residual load.
"""
if network.pypsa is not None:
residual_load = get_residual_load_from_pypsa_network(network.pypsa) * \
1e3
else:
grids = [network.mv_grid] + list(network.mv_grid.lv_grids)
gens = []
loads = []
for grid in grids:
gens.extend(grid.generators)
gens.extend(list(grid.graph.nodes_by_attribute('storage')))
loads.extend(list(grid.graph.nodes_by_attribute('load')))
generation_timeseries = pd.Series(
0, index=network.timeseries.timeindex)
for gen in gens:
generation_timeseries += gen.timeseries.p
load_timeseries = pd.Series(0, index=network.timeseries.timeindex)
for load in loads:
load_timeseries += load.timeseries.p
residual_load = load_timeseries - generation_timeseries
timeseries_load_feedin_case = residual_load.rename(
'residual_load').to_frame()
timeseries_load_feedin_case['case'] = \
timeseries_load_feedin_case.residual_load.apply(
lambda _: 'feedin_case' if _ < 0 else 'load_case')
return timeseries_load_feedin_case
|
python
|
{
"resource": ""
}
|
q16605
|
load_config
|
train
|
def load_config(filename, config_dir=None, copy_default_config=True):
"""
Loads the specified config file.
Parameters
-----------
filename : :obj:`str`
Config file name, e.g. 'config_grid.cfg'.
config_dir : :obj:`str`, optional
Path to config file. If None uses default edisgo config directory
specified in config file 'config_system.cfg' in section 'user_dirs'
by subsections 'root_dir' and 'config_dir'. Default: None.
copy_default_config : Boolean
If True copies a default config file into `config_dir` if the
specified config file does not exist. Default: True.
"""
if not config_dir:
config_file = os.path.join(get_default_config_path(), filename)
else:
config_file = os.path.join(config_dir, filename)
# config file does not exist -> copy default
if not os.path.isfile(config_file):
if copy_default_config:
logger.info('Config file {} not found, I will create a '
'default version'.format(config_file))
make_directory(config_dir)
shutil.copy(os.path.join(package_path, 'config', filename.
replace('.cfg', '_default.cfg')),
config_file)
else:
message = 'Config file {} not found.'.format(config_file)
logger.error(message)
raise FileNotFoundError(message)
if len(cfg.read(config_file)) == 0:
message = 'Config file {} not found or empty.'.format(config_file)
logger.error(message)
raise FileNotFoundError(message)
global _loaded
_loaded = True
|
python
|
{
"resource": ""
}
|
q16606
|
get_default_config_path
|
train
|
def get_default_config_path():
"""
Returns the basic edisgo config path. If it does not yet exist it creates
it and copies all default config files into it.
Returns
--------
:obj:`str`
Path to default edisgo config directory specified in config file
'config_system.cfg' in section 'user_dirs' by subsections 'root_dir'
and 'config_dir'.
"""
config_dir = get('user_dirs', 'config_dir')
root_dir = get('user_dirs', 'root_dir')
root_path = os.path.join(os.path.expanduser('~'), root_dir)
config_path = os.path.join(root_path, config_dir)
# root directory does not exist
if not os.path.isdir(root_path):
# create it
logger.info('eDisGo root path {} not found, I will create it.'
.format(root_path))
make_directory(root_path)
# config directory does not exist
if not os.path.isdir(config_path):
# create it
config_path = os.path.join(root_path, config_dir)
make_directory(config_path)
# copy default config files
logger.info('eDisGo config path {} not found, I will create it.'
.format(config_path))
# copy default config files if they don't exist
internal_config_dir = os.path.join(package_path, 'config')
for file in glob(os.path.join(internal_config_dir, '*.cfg')):
filename = os.path.join(config_path,
os.path.basename(file).replace('_default', ''))
if not os.path.isfile(filename):
logger.info('I will create a default config file {} in {}'
.format(file, config_path))
shutil.copy(file, filename)
return config_path
|
python
|
{
"resource": ""
}
|
q16607
|
make_directory
|
train
|
def make_directory(directory):
"""
Makes directory if it does not exist.
Parameters
-----------
directory : :obj:`str`
Directory path
"""
if not os.path.isdir(directory):
os.mkdir(directory)
logger.info('Path {} not found, I will create it.'
.format(directory))
|
python
|
{
"resource": ""
}
|
q16608
|
mv_line_load
|
train
|
def mv_line_load(network):
"""
Checks for over-loading issues in MV grid.
Parameters
----------
network : :class:`~.grid.network.Network`
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded MV lines, their maximum relative
over-loading and the corresponding time step.
Index of the dataframe are the over-loaded lines of type
:class:`~.grid.components.Line`. Columns are 'max_rel_overload'
containing the maximum relative over-loading as float and 'time_index'
containing the corresponding time step the over-loading occured in as
:pandas:`pandas.Timestamp<timestamp>`.
Notes
-----
Line over-load is determined based on allowed load factors for feed-in and
load cases that are defined in the config file 'config_grid_expansion' in
section 'grid_expansion_load_factors'.
"""
crit_lines = pd.DataFrame()
crit_lines = _line_load(network, network.mv_grid, crit_lines)
if not crit_lines.empty:
logger.debug('==> {} line(s) in MV grid has/have load issues.'.format(
crit_lines.shape[0]))
else:
logger.debug('==> No line load issues in MV grid.')
return crit_lines
|
python
|
{
"resource": ""
}
|
q16609
|
lv_line_load
|
train
|
def lv_line_load(network):
"""
Checks for over-loading issues in LV grids.
Parameters
----------
network : :class:`~.grid.network.Network`
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded LV lines, their maximum relative
over-loading and the corresponding time step.
Index of the dataframe are the over-loaded lines of type
:class:`~.grid.components.Line`. Columns are 'max_rel_overload'
containing the maximum relative over-loading as float and 'time_index'
containing the corresponding time step the over-loading occured in as
:pandas:`pandas.Timestamp<timestamp>`.
Notes
-----
Line over-load is determined based on allowed load factors for feed-in and
load cases that are defined in the config file 'config_grid_expansion' in
section 'grid_expansion_load_factors'.
"""
crit_lines = pd.DataFrame()
for lv_grid in network.mv_grid.lv_grids:
crit_lines = _line_load(network, lv_grid, crit_lines)
if not crit_lines.empty:
logger.debug('==> {} line(s) in LV grids has/have load issues.'.format(
crit_lines.shape[0]))
else:
logger.debug('==> No line load issues in LV grids.')
return crit_lines
|
python
|
{
"resource": ""
}
|
q16610
|
_line_load
|
train
|
def _line_load(network, grid, crit_lines):
"""
Checks for over-loading issues of lines.
Parameters
----------
network : :class:`~.grid.network.Network`
grid : :class:`~.grid.grids.LVGrid` or :class:`~.grid.grids.MVGrid`
crit_lines : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded lines, their maximum relative
over-loading and the corresponding time step.
Index of the dataframe are the over-loaded lines of type
:class:`~.grid.components.Line`. Columns are 'max_rel_overload'
containing the maximum relative over-loading as float and 'time_index'
containing the corresponding time step the over-loading occured in as
:pandas:`pandas.Timestamp<timestamp>`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded lines, their maximum relative
over-loading and the corresponding time step.
Index of the dataframe are the over-loaded lines of type
:class:`~.grid.components.Line`. Columns are 'max_rel_overload'
containing the maximum relative over-loading as float and 'time_index'
containing the corresponding time step the over-loading occured in as
:pandas:`pandas.Timestamp<timestamp>`.
"""
if isinstance(grid, LVGrid):
grid_level = 'lv'
else:
grid_level = 'mv'
for line in list(grid.graph.lines()):
i_line_allowed_per_case = {}
i_line_allowed_per_case['feedin_case'] = \
line['line'].type['I_max_th'] * line['line'].quantity * \
network.config['grid_expansion_load_factors'][
'{}_feedin_case_line'.format(grid_level)]
i_line_allowed_per_case['load_case'] = \
line['line'].type['I_max_th'] * line['line'].quantity * \
network.config['grid_expansion_load_factors'][
'{}_load_case_line'.format(grid_level)]
# maximum allowed line load in each time step
i_line_allowed = \
network.timeseries.timesteps_load_feedin_case.case.apply(
lambda _: i_line_allowed_per_case[_])
try:
# check if maximum current from power flow analysis exceeds
# allowed maximum current
i_line_pfa = network.results.i_res[repr(line['line'])]
if any((i_line_allowed - i_line_pfa) < 0):
# find out largest relative deviation
relative_i_res = i_line_pfa / i_line_allowed
crit_lines = crit_lines.append(pd.DataFrame(
{'max_rel_overload': relative_i_res.max(),
'time_index': relative_i_res.idxmax()},
index=[line['line']]))
except KeyError:
logger.debug('No results for line {} '.format(str(line)) +
'to check overloading.')
return crit_lines
|
python
|
{
"resource": ""
}
|
q16611
|
_station_load
|
train
|
def _station_load(network, station, crit_stations):
"""
Checks for over-loading of stations.
Parameters
----------
network : :class:`~.grid.network.Network`
station : :class:`~.grid.components.LVStation` or :class:`~.grid.components.MVStation`
crit_stations : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded stations, their apparent power at
maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations either of type
:class:`~.grid.components.LVStation` or
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded stations, their apparent power at
maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations either of type
:class:`~.grid.components.LVStation` or
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`.
"""
if isinstance(station, LVStation):
grid_level = 'lv'
else:
grid_level = 'mv'
# maximum allowed apparent power of station for feed-in and load case
s_station = sum([_.type.S_nom for _ in station.transformers])
s_station_allowed_per_case = {}
s_station_allowed_per_case['feedin_case'] = s_station * network.config[
'grid_expansion_load_factors']['{}_feedin_case_transformer'.format(
grid_level)]
s_station_allowed_per_case['load_case'] = s_station * network.config[
'grid_expansion_load_factors']['{}_load_case_transformer'.format(
grid_level)]
# maximum allowed apparent power of station in each time step
s_station_allowed = \
network.timeseries.timesteps_load_feedin_case.case.apply(
lambda _: s_station_allowed_per_case[_])
try:
if isinstance(station, LVStation):
s_station_pfa = network.results.s_res(
station.transformers).sum(axis=1)
else:
s_station_pfa = network.results.s_res([station]).iloc[:, 0]
s_res = s_station_allowed - s_station_pfa
s_res = s_res[s_res < 0]
# check if maximum allowed apparent power of station exceeds
# apparent power from power flow analysis at any time step
if not s_res.empty:
# find out largest relative deviation
load_factor = \
network.timeseries.timesteps_load_feedin_case.case.apply(
lambda _: network.config[
'grid_expansion_load_factors'][
'{}_{}_transformer'.format(grid_level, _)])
relative_s_res = load_factor * s_res
crit_stations = crit_stations.append(pd.DataFrame(
{'s_pfa': s_station_pfa.loc[relative_s_res.idxmin()],
'time_index': relative_s_res.idxmin()},
index=[station]))
except KeyError:
logger.debug('No results for {} station to check overloading.'.format(
grid_level.upper()))
return crit_stations
|
python
|
{
"resource": ""
}
|
q16612
|
mv_voltage_deviation
|
train
|
def mv_voltage_deviation(network, voltage_levels='mv_lv'):
"""
Checks for voltage stability issues in MV grid.
Parameters
----------
network : :class:`~.grid.network.Network`
voltage_levels : :obj:`str`
Specifies which allowed voltage deviations to use. Possible options
are:
* 'mv_lv'
This is the default. The allowed voltage deviation for nodes in the
MV grid is the same as for nodes in the LV grid. Further load and
feed-in case are not distinguished.
* 'mv'
Use this to handle allowed voltage deviations in the MV and LV grid
differently. Here, load and feed-in case are differentiated as well.
Returns
-------
:obj:`dict`
Dictionary with :class:`~.grid.grids.MVGrid` as key and a
:pandas:`pandas.DataFrame<dataframe>` with its critical nodes, sorted
descending by voltage deviation, as value.
Index of the dataframe are all nodes (of type
:class:`~.grid.components.Generator`, :class:`~.grid.components.Load`,
etc.) with over-voltage issues. Columns are 'v_mag_pu' containing the
maximum voltage deviation as float and 'time_index' containing the
corresponding time step the over-voltage occured in as
:pandas:`pandas.Timestamp<timestamp>`.
Notes
-----
Over-voltage is determined based on allowed voltage deviations defined in
the config file 'config_grid_expansion' in section
'grid_expansion_allowed_voltage_deviations'.
"""
crit_nodes = {}
v_dev_allowed_per_case = {}
v_dev_allowed_per_case['feedin_case_lower'] = 0.9
v_dev_allowed_per_case['load_case_upper'] = 1.1
offset = network.config[
'grid_expansion_allowed_voltage_deviations']['hv_mv_trafo_offset']
control_deviation = network.config[
'grid_expansion_allowed_voltage_deviations'][
'hv_mv_trafo_control_deviation']
if voltage_levels == 'mv_lv':
v_dev_allowed_per_case['feedin_case_upper'] = \
1 + offset + control_deviation + network.config[
'grid_expansion_allowed_voltage_deviations'][
'mv_lv_feedin_case_max_v_deviation']
v_dev_allowed_per_case['load_case_lower'] = \
1 + offset - control_deviation - network.config[
'grid_expansion_allowed_voltage_deviations'][
'mv_lv_load_case_max_v_deviation']
elif voltage_levels == 'mv':
v_dev_allowed_per_case['feedin_case_upper'] = \
1 + offset + control_deviation + network.config[
'grid_expansion_allowed_voltage_deviations'][
'mv_feedin_case_max_v_deviation']
v_dev_allowed_per_case['load_case_lower'] = \
1 + offset - control_deviation - network.config[
'grid_expansion_allowed_voltage_deviations'][
'mv_load_case_max_v_deviation']
else:
raise ValueError(
'Specified mode {} is not a valid option.'.format(voltage_levels))
# maximum allowed apparent power of station in each time step
v_dev_allowed_upper = \
network.timeseries.timesteps_load_feedin_case.case.apply(
lambda _: v_dev_allowed_per_case['{}_upper'.format(_)])
v_dev_allowed_lower = \
network.timeseries.timesteps_load_feedin_case.case.apply(
lambda _: v_dev_allowed_per_case['{}_lower'.format(_)])
nodes = network.mv_grid.graph.nodes()
crit_nodes_grid = _voltage_deviation(
network, nodes, v_dev_allowed_upper, v_dev_allowed_lower,
voltage_level='mv')
if not crit_nodes_grid.empty:
crit_nodes[network.mv_grid] = crit_nodes_grid.sort_values(
by=['v_mag_pu'], ascending=False)
logger.debug(
'==> {} node(s) in MV grid has/have voltage issues.'.format(
crit_nodes[network.mv_grid].shape[0]))
else:
logger.debug('==> No voltage issues in MV grid.')
return crit_nodes
|
python
|
{
"resource": ""
}
|
q16613
|
check_ten_percent_voltage_deviation
|
train
|
def check_ten_percent_voltage_deviation(network):
"""
Checks if 10% criteria is exceeded.
Parameters
----------
network : :class:`~.grid.network.Network`
"""
v_mag_pu_pfa = network.results.v_res()
if (v_mag_pu_pfa > 1.1).any().any() or (v_mag_pu_pfa < 0.9).any().any():
message = "Maximum allowed voltage deviation of 10% exceeded."
raise ValueError(message)
|
python
|
{
"resource": ""
}
|
q16614
|
calc_geo_lines_in_buffer
|
train
|
def calc_geo_lines_in_buffer(network, node, grid, radius, radius_inc):
"""Determines lines in nodes' associated graph that are at least partly
within buffer of radius from node. If there are no lines, the buffer is
successively extended by radius_inc until lines are found.
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
node : :class:`~.grid.components.Component`
Origin node the buffer is created around (e.g. :class:`~.grid.components.Generator`).
Node must be a member of grid's graph (grid.graph)
grid : :class:`~.grid.grids.Grid`
Grid whose lines are searched
radius : :obj:`float`
Buffer radius in m
radius_inc : :obj:`float`
Buffer radius increment in m
Returns
-------
:obj:`list` of :class:`~.grid.components.Line`
Sorted (by repr()) list of lines
Notes
-----
Adapted from `Ding0 <https://github.com/openego/ding0/blob/\
21a52048f84ec341fe54e0204ac62228a9e8a32a/\
ding0/tools/geo.py#L53>`_.
"""
lines = []
while not lines:
node_shp = transform(proj2equidistant(network), node.geom)
buffer_zone_shp = node_shp.buffer(radius)
for line in grid.graph.lines():
nodes = line['adj_nodes']
branch_shp = transform(proj2equidistant(network), LineString([nodes[0].geom, nodes[1].geom]))
if buffer_zone_shp.intersects(branch_shp):
lines.append(line)
radius += radius_inc
return sorted(lines, key=lambda _: repr(_))
|
python
|
{
"resource": ""
}
|
q16615
|
calc_geo_dist_vincenty
|
train
|
def calc_geo_dist_vincenty(network, node_source, node_target):
"""Calculates the geodesic distance between node_source and node_target
incorporating the detour factor in config.
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
node_source : :class:`~.grid.components.Component`
Node to connect (e.g. :class:`~.grid.components.Generator`)
node_target : :class:`~.grid.components.Component`
Target node (e.g. :class:`~.grid.components.BranchTee`)
Returns
-------
:obj:`float`
Distance in m
"""
branch_detour_factor = network.config['grid_connection'][
'branch_detour_factor']
# notice: vincenty takes (lat,lon)
branch_length = branch_detour_factor * vincenty((node_source.geom.y, node_source.geom.x),
(node_target.geom.y, node_target.geom.x)).m
# ========= BUG: LINE LENGTH=0 WHEN CONNECTING GENERATORS ===========
# When importing generators, the geom_new field is used as position. If it is empty, EnergyMap's geom
# is used and so there are a couple of generators at the same position => length of interconnecting
# line is 0. See issue #76
if branch_length == 0:
branch_length = 1
logger.debug('Geo distance is zero, check objects\' positions. '
'Distance is set to 1m')
# ===================================================================
return branch_length
|
python
|
{
"resource": ""
}
|
q16616
|
get_username
|
train
|
def get_username(details, backend, response, *args, **kwargs):
"""Sets the `username` argument.
If the user exists already, use the existing username. Otherwise
generate username from the `new_uuid` using the
`helusers.utils.uuid_to_username` function.
"""
user = details.get('user')
if not user:
user_uuid = kwargs.get('uid')
if not user_uuid:
return
username = uuid_to_username(user_uuid)
else:
username = user.username
return {
'username': username
}
|
python
|
{
"resource": ""
}
|
q16617
|
SocialAccountAdapter.pre_social_login
|
train
|
def pre_social_login(self, request, sociallogin):
"""Update user based on token information."""
user = sociallogin.user
# If the user hasn't been saved yet, it will be updated
# later on in the sign-up flow.
if not user.pk:
return
data = sociallogin.account.extra_data
oidc = sociallogin.account.provider == 'helsinki_oidc'
update_user(user, data, oidc)
|
python
|
{
"resource": ""
}
|
q16618
|
AbstractUser.sync_groups_from_ad
|
train
|
def sync_groups_from_ad(self):
"""Determine which Django groups to add or remove based on AD groups."""
ad_list = ADGroupMapping.objects.values_list('ad_group', 'group')
mappings = {ad_group: group for ad_group, group in ad_list}
user_ad_groups = set(self.ad_groups.filter(groups__isnull=False).values_list(flat=True))
all_mapped_groups = set(mappings.values())
old_groups = set(self.groups.filter(id__in=all_mapped_groups).values_list(flat=True))
new_groups = set([mappings[x] for x in user_ad_groups])
groups_to_delete = old_groups - new_groups
if groups_to_delete:
self.groups.remove(*groups_to_delete)
groups_to_add = new_groups - old_groups
if groups_to_add:
self.groups.add(*groups_to_add)
|
python
|
{
"resource": ""
}
|
q16619
|
patch_jwt_settings
|
train
|
def patch_jwt_settings():
"""Patch rest_framework_jwt authentication settings from allauth"""
defaults = api_settings.defaults
defaults['JWT_PAYLOAD_GET_USER_ID_HANDLER'] = (
__name__ + '.get_user_id_from_payload_handler')
if 'allauth.socialaccount' not in settings.INSTALLED_APPS:
return
from allauth.socialaccount.models import SocialApp
try:
app = SocialApp.objects.get(provider='helsinki')
except SocialApp.DoesNotExist:
return
defaults['JWT_SECRET_KEY'] = app.secret
defaults['JWT_AUDIENCE'] = app.client_id
|
python
|
{
"resource": ""
}
|
q16620
|
uuid_to_username
|
train
|
def uuid_to_username(uuid):
"""
Convert UUID to username.
>>> uuid_to_username('00fbac99-0bab-5e66-8e84-2e567ea4d1f6')
'u-ad52zgilvnpgnduefzlh5jgr6y'
>>> uuid_to_username(UUID('00fbac99-0bab-5e66-8e84-2e567ea4d1f6'))
'u-ad52zgilvnpgnduefzlh5jgr6y'
"""
uuid_data = getattr(uuid, 'bytes', None) or UUID(uuid).bytes
b32coded = base64.b32encode(uuid_data)
return 'u-' + b32coded.decode('ascii').replace('=', '').lower()
|
python
|
{
"resource": ""
}
|
q16621
|
username_to_uuid
|
train
|
def username_to_uuid(username):
"""
Convert username to UUID.
>>> username_to_uuid('u-ad52zgilvnpgnduefzlh5jgr6y')
UUID('00fbac99-0bab-5e66-8e84-2e567ea4d1f6')
"""
if not username.startswith('u-') or len(username) != 28:
raise ValueError('Not an UUID based username: %r' % (username,))
decoded = base64.b32decode(username[2:].upper() + '======')
return UUID(bytes=decoded)
|
python
|
{
"resource": ""
}
|
q16622
|
oidc_to_user_data
|
train
|
def oidc_to_user_data(payload):
"""
Map OIDC claims to Django user fields.
"""
payload = payload.copy()
field_map = {
'given_name': 'first_name',
'family_name': 'last_name',
'email': 'email',
}
ret = {}
for token_attr, user_attr in field_map.items():
if token_attr not in payload:
continue
ret[user_attr] = payload.pop(token_attr)
ret.update(payload)
return ret
|
python
|
{
"resource": ""
}
|
q16623
|
UserAuthorization.has_api_scopes
|
train
|
def has_api_scopes(self, *api_scopes):
"""
Test if all given API scopes are authorized.
:type api_scopes: list[str]
:param api_scopes: The API scopes to test
:rtype: bool|None
:return:
True or False, if the API Token has the API scopes field set,
otherwise None
"""
if self._authorized_api_scopes is None:
return None
return all((x in self._authorized_api_scopes) for x in api_scopes)
|
python
|
{
"resource": ""
}
|
q16624
|
UserAuthorization.has_api_scope_with_prefix
|
train
|
def has_api_scope_with_prefix(self, prefix):
"""
Test if there is an API scope with the given prefix.
:rtype: bool|None
"""
if self._authorized_api_scopes is None:
return None
return any(
x == prefix or x.startswith(prefix + '.')
for x in self._authorized_api_scopes)
|
python
|
{
"resource": ""
}
|
q16625
|
Client.register_site
|
train
|
def register_site(self):
"""Function to register the site and generate a unique ID for the site
Returns:
**string:** The ID of the site (also called client id) if the registration is successful
Raises:
**OxdServerError:** If the site registration fails.
"""
if self.oxd_id:
logger.info('Client is already registered. ID: %s', self.oxd_id)
return self.oxd_id
# add required params for the command
params = {
"authorization_redirect_uri": self.authorization_redirect_uri,
"oxd_rp_programming_language": "python",
}
# add other optional params if they exist in config
for op in self.opt_params:
if self.config.get("client", op):
params[op] = self.config.get("client", op)
for olp in self.opt_list_params:
if self.config.get("client", olp):
params[olp] = self.config.get("client", olp).split(",")
logger.debug("Sending command `register_site` with params %s", params)
response = self.msgr.request("register_site", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
self.oxd_id = response["data"]["oxd_id"]
self.config.set("oxd", "id", self.oxd_id)
logger.info("Site registration successful. Oxd ID: %s", self.oxd_id)
return self.oxd_id
|
python
|
{
"resource": ""
}
|
q16626
|
Client.get_authorization_url
|
train
|
def get_authorization_url(self, acr_values=None, prompt=None, scope=None,
custom_params=None):
"""Function to get the authorization url that can be opened in the
browser for the user to provide authorization and authentication
Parameters:
* **acr_values (list, optional):** acr values in the order of priority
* **prompt (string, optional):** prompt=login is required if you want to force alter current user session (in case user is already logged in from site1 and site2 constructs authorization request and want to force alter current user session)
* **scope (list, optional):** scopes required, takes the one provided during site registrations by default
* **custom_params (dict, optional):** Any custom arguments that the client wishes to pass on to the OP can be passed on as extra parameters to the function
Returns:
**string:** The authorization url that the user must access for authentication and authorization
Raises:
**OxdServerError:** If the oxd throws an error for any reason.
"""
params = {"oxd_id": self.oxd_id}
if scope and isinstance(scope, list):
params["scope"] = scope
if acr_values and isinstance(acr_values, list):
params["acr_values"] = acr_values
if prompt and isinstance(prompt, str):
params["prompt"] = prompt
if custom_params:
params["custom_parameters"] = custom_params
logger.debug("Sending command `get_authorization_url` with params %s",
params)
response = self.msgr.request("get_authorization_url", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
return response['data']['authorization_url']
|
python
|
{
"resource": ""
}
|
q16627
|
Client.get_tokens_by_code
|
train
|
def get_tokens_by_code(self, code, state):
"""Function to get access code for getting the user details from the
OP. It is called after the user authorizes by visiting the auth URL.
Parameters:
* **code (string):** code, parse from the callback URL querystring
* **state (string):** state value parsed from the callback URL
Returns:
**dict:** The tokens object with the following data structure.
Example response::
{
"access_token": "<token string>",
"expires_in": 3600,
"refresh_token": "<token string>",
"id_token": "<token string>",
"id_token_claims":
{
"iss": "https://server.example.com",
"sub": "24400320",
"aud": "s6BhdRkqt3",
"nonce": "n-0S6_WzA2Mj",
"exp": 1311281970,
"iat": 1311280970,
"at_hash": "MTIzNDU2Nzg5MDEyMzQ1Ng"
}
}
Raises:
**OxdServerError:** If oxd server throws an error OR if the params code
and scopes are of improper data type.
"""
params = dict(oxd_id=self.oxd_id, code=code, state=state)
logger.debug("Sending command `get_tokens_by_code` with params %s",
params)
response = self.msgr.request("get_tokens_by_code", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
return response['data']
|
python
|
{
"resource": ""
}
|
q16628
|
Client.get_access_token_by_refresh_token
|
train
|
def get_access_token_by_refresh_token(self, refresh_token, scope=None):
"""Function that is used to get a new access token using refresh token
Parameters:
* **refresh_token (str):** refresh_token from get_tokens_by_code command
* **scope (list, optional):** a list of scopes. If not specified should grant access with scope provided in previous request
Returns:
**dict:** the tokens with expiry time.
Example response::
{
"access_token":"SlAV32hkKG",
"expires_in":3600,
"refresh_token":"aaAV32hkKG1"
}
"""
params = {
"oxd_id": self.oxd_id,
"refresh_token": refresh_token
}
if scope:
params['scope'] = scope
logger.debug("Sending command `get_access_token_by_refresh_token` with"
" params %s", params)
response = self.msgr.request("get_access_token_by_refresh_token",
**params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
return response['data']
|
python
|
{
"resource": ""
}
|
q16629
|
Client.get_user_info
|
train
|
def get_user_info(self, access_token):
"""Function to get the information about the user using the access code
obtained from the OP
Note:
Refer to the /.well-known/openid-configuration URL of your OP for
the complete list of the claims for different scopes.
Parameters:
* **access_token (string):** access token from the get_tokens_by_code function
Returns:
**dict:** The user data claims that are returned by the OP in format
Example response::
{
"sub": ["248289761001"],
"name": ["Jane Doe"],
"given_name": ["Jane"],
"family_name": ["Doe"],
"preferred_username": ["j.doe"],
"email": ["janedoe@example.com"],
"picture": ["http://example.com/janedoe/me.jpg"]
}
Raises:
**OxdServerError:** If the param access_token is empty OR if the oxd
Server returns an error.
"""
params = dict(oxd_id=self.oxd_id, access_token=access_token)
params["access_token"] = access_token
logger.debug("Sending command `get_user_info` with params %s",
params)
response = self.msgr.request("get_user_info", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
return response['data']['claims']
|
python
|
{
"resource": ""
}
|
q16630
|
Client.get_logout_uri
|
train
|
def get_logout_uri(self, id_token_hint=None, post_logout_redirect_uri=None,
state=None, session_state=None):
"""Function to logout the user.
Parameters:
* **id_token_hint (string, optional):** oxd server will use last used ID Token, if not provided
* **post_logout_redirect_uri (string, optional):** URI to redirect, this uri would override the value given in the site-config
* **state (string, optional):** site state
* **session_state (string, optional):** session state
Returns:
**string:** The URI to which the user must be directed in order to
perform the logout
"""
params = {"oxd_id": self.oxd_id}
if id_token_hint:
params["id_token_hint"] = id_token_hint
if post_logout_redirect_uri:
params["post_logout_redirect_uri"] = post_logout_redirect_uri
if state:
params["state"] = state
if session_state:
params["session_state"] = session_state
logger.debug("Sending command `get_logout_uri` with params %s", params)
response = self.msgr.request("get_logout_uri", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
return response['data']['uri']
|
python
|
{
"resource": ""
}
|
q16631
|
Client.update_site
|
train
|
def update_site(self, client_secret_expires_at=None):
"""Function to update the site's information with OpenID Provider.
This should be called after changing the values in the cfg file.
Parameters:
* **client_secret_expires_at (long, OPTIONAL):** milliseconds since 1970, can be used to extends client lifetime
Returns:
**bool:** The status for update. True for success and False for failure
Raises:
**OxdServerError:** When the update fails and oxd server returns error
"""
params = {
"oxd_id": self.oxd_id,
"authorization_redirect_uri": self.authorization_redirect_uri
}
if client_secret_expires_at:
params["client_secret_expires_at"] = client_secret_expires_at
for param in self.opt_params:
if self.config.get("client", param):
value = self.config.get("client", param)
params[param] = value
for param in self.opt_list_params:
if self.config.get("client", param):
value = self.config.get("client", param).split(",")
params[param] = value
logger.debug("Sending `update_site` with params %s",
params)
response = self.msgr.request("update_site", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
return True
|
python
|
{
"resource": ""
}
|
q16632
|
Client.uma_rs_protect
|
train
|
def uma_rs_protect(self, resources, overwrite=None):
"""Function to be used in a UMA Resource Server to protect resources.
Parameters:
* **resources (list):** list of resource to protect. See example at `here <https://gluu.org/docs/oxd/3.1.2/api/#uma-rs-protect-resources>`_
* **overwrite (bool):** If true, Allows to update existing resources
Returns:
**bool:** The status of the request.
"""
params = dict(oxd_id=self.oxd_id, resources=resources)
if overwrite:
params["overwrite"] = overwrite
logger.debug("Sending `uma_rs_protect` with params %s", params)
response = self.msgr.request("uma_rs_protect", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
return True
|
python
|
{
"resource": ""
}
|
q16633
|
Client.uma_rs_check_access
|
train
|
def uma_rs_check_access(self, rpt, path, http_method):
"""Function to be used in a UMA Resource Server to check access.
Parameters:
* **rpt (string):** RPT or blank value if absent (not send by RP)
* **path (string):** Path of resource (e.g. for http://rs.com/phones, /phones should be passed)
* **http_method (string):** Http method of RP request (GET, POST, PUT, DELETE)
Returns:
**dict:** The access information received in the format below.
If the access is granted::
{ "access": "granted" }
If the access is denied with ticket response::
{
"access": "denied",
"www-authenticate_header": "UMA realm='example',
as_uri='https://as.example.com',
error='insufficient_scope',
ticket='016f84e8-f9b9-11e0-bd6f-0021cc6004de'",
"ticket": "016f84e8-f9b9-11e0-bd6f-0021cc6004de"
}
If the access is denied without ticket response::
{ "access": "denied" }
Raises:
``oxdpython.exceptions.InvalidRequestError`` if the resource is not
protected
"""
params = {"oxd_id": self.oxd_id,
"rpt": rpt,
"path": path,
"http_method": http_method}
logger.debug("Sending command `uma_rs_check_access` with params %s",
params)
response = self.msgr.request("uma_rs_check_access", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
if response['data']['error'] == 'invalid_request':
raise InvalidRequestError(response['data'])
else:
raise OxdServerError(response['data'])
return response['data']
|
python
|
{
"resource": ""
}
|
q16634
|
Client.uma_rp_get_rpt
|
train
|
def uma_rp_get_rpt(self, ticket, claim_token=None, claim_token_format=None,
pct=None, rpt=None, scope=None, state=None):
"""Function to be used by a UMA Requesting Party to get RPT token.
Parameters:
* **ticket (str, REQUIRED):** ticket
* **claim_token (str, OPTIONAL):** claim token
* **claim_token_format (str, OPTIONAL):** claim token format
* **pct (str, OPTIONAL):** pct
* **rpt (str, OPTIONAL):** rpt
* **scope (list, OPTIONAL):** scope
* **state (str, OPTIONAL):** state that is returned from `uma_rp_get_claims_gathering_url` command
Returns:
**dict:** The response from the OP.
Success response::
{
"status":"ok",
"data":{
"access_token":"SSJHBSUSSJHVhjsgvhsgvshgsv",
"token_type":"Bearer",
"pct":"c2F2ZWRjb25zZW50",
"upgraded":true
}
}
NeedInfoError response::
{
"error":"need_info",
"ticket":"ZXJyb3JfZGV0YWlscw==",
"required_claims":[
{
"claim_token_format":[
"http://openid.net/specs/openid-connect-core-1_0.html#IDToken"
],
"claim_type":"urn:oid:0.9.2342.19200300.100.1.3",
"friendly_name":"email",
"issuer":["https://example.com/idp"],
"name":"email23423453ou453"
}
],
"redirect_user":"https://as.example.com/rqp_claims?id=2346576421"
}
Raises:
**OxdServerError:** When oxd-server reports a generic internal_error
**InvalidTicketError:** When the oxd server returns a "invalid_ticket" error
"""
params = {
"oxd_id": self.oxd_id,
"ticket": ticket
}
if claim_token:
params["claim_token"] = claim_token
if claim_token_format:
params["claim_token_format"] = claim_token_format
if pct:
params["pct"] = pct
if rpt:
params["rpt"] = rpt
if scope:
params["scope"] = scope
if state:
params["state"] = state
logger.debug("Sending command `uma_rp_get_rpt` with params %s", params)
response = self.msgr.request("uma_rp_get_rpt", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'ok':
return response['data']
if response['data']['error'] == 'internal_error':
raise OxdServerError(response['data'])
if response['data']['error'] == 'need_info':
return response['data']
if response['data']['error'] == 'invalid_ticket':
raise InvalidTicketError(response['data'])
|
python
|
{
"resource": ""
}
|
q16635
|
Client.uma_rp_get_claims_gathering_url
|
train
|
def uma_rp_get_claims_gathering_url(self, ticket):
"""UMA RP function to get the claims gathering URL.
Parameters:
* **ticket (str):** ticket to pass to the auth server. for 90% of the cases, this will be obtained from 'need_info' error of get_rpt
Returns:
**string** specifying the claims gathering url
"""
params = {
'oxd_id': self.oxd_id,
'claims_redirect_uri': self.config.get('client',
'claims_redirect_uri'),
'ticket': ticket
}
logger.debug("Sending command `uma_rp_get_claims_gathering_url` with "
"params %s", params)
response = self.msgr.request("uma_rp_get_claims_gathering_url",
**params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
return response['data']['url']
|
python
|
{
"resource": ""
}
|
q16636
|
Client.setup_client
|
train
|
def setup_client(self):
"""The command registers the client for communication protection. This
will be used to obtain an access token via the Get Client Token
command. The access token will be passed as a protection_access_token
parameter to other commands.
Note:
If you are using the oxd-https-extension, you must setup the client
Returns:
**dict:** the client setup information
Example response::
{
"oxd_id":"6F9619FF-8B86-D011-B42D-00CF4FC964FF",
"op_host": "<op host>",
"client_id":"<client id>",
"client_secret":"<client secret>",
"client_registration_access_token":"<Client registration access token>",
"client_registration_client_uri":"<URI of client registration>",
"client_id_issued_at":"<client_id issued at>",
"client_secret_expires_at":"<client_secret expires at>"
}
"""
# add required params for the command
params = {
"authorization_redirect_uri": self.authorization_redirect_uri,
"oxd_rp_programming_language": "python",
}
# add other optional params if they exist in config
for op in self.opt_params:
if self.config.get("client", op):
params[op] = self.config.get("client", op)
for olp in self.opt_list_params:
if self.config.get("client", olp):
params[olp] = self.config.get("client", olp).split(",")
logger.debug("Sending command `setup_client` with params %s", params)
response = self.msgr.request("setup_client", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
data = response["data"]
self.oxd_id = data["oxd_id"]
self.config.set("oxd", "id", data["oxd_id"])
self.config.set("client", "client_id", data["client_id"])
self.config.set("client", "client_secret", data["client_secret"])
if data["client_registration_access_token"]:
self.config.set("client", "client_registration_access_token",
data["client_registration_access_token"])
if data["client_registration_client_uri"]:
self.config.set("client", "client_registration_client_uri",
data["client_registration_client_uri"])
self.config.set("client", "client_id_issued_at",
str(data["client_id_issued_at"]))
return data
|
python
|
{
"resource": ""
}
|
q16637
|
Client.get_client_token
|
train
|
def get_client_token(self, client_id=None, client_secret=None,
op_host=None, op_discovery_path=None, scope=None,
auto_update=True):
"""Function to get the client token which can be used for protection in
all future communication. The access token received by this method is
stored in the config file and used as the `protection_access_token`
for all subsequent calls to oxd.
Parameters:
* **client_id (str, optional):** client id from OP or from previous `setup_client` call
* **client_secret (str, optional):** client secret from the OP or from `setup_client` call
* **op_host (str, optional):** OP Host URL, default is read from the site configuration file
* **op_discovery_path (str, optional):** op discovery path provided by OP
* **scope (list, optional):** scopes of access required, default values are obtained from the config file
* **auto_update(bool, optional):** automatically get a new access_token when the current one expires. If this is set to False, then the application must call `get_client_token` when the token expires to update the client with a new access token.
Returns:
**dict:** The client token and the refresh token in the form.
Example response ::
{
"access_token":"6F9619FF-8B86-D011-B42D-00CF4FC964FF",
"expires_in": 399,
"refresh_token": "fr459f",
"scope": "openid"
}
"""
# override the values from config
params = dict(client_id=client_id, client_secret=client_secret,
op_host=op_host)
if op_discovery_path:
params['op_discovery_path'] = op_discovery_path
if scope and isinstance(scope, list):
params['scope'] = scope
# If client id and secret aren't passed, then just read from the config
if not client_id:
params["client_id"] = self.config.get("client", "client_id")
if not client_secret:
params["client_secret"] = self.config.get("client",
"client_secret")
if not op_host:
params["op_host"] = self.config.get("client", "op_host")
logger.debug("Sending command `get_client_token` with params %s",
params)
response = self.msgr.request("get_client_token", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
self.config.set("client", "protection_access_token",
response["data"]["access_token"])
self.msgr.access_token = response["data"]["access_token"]
# Setup a new timer thread to refresh the access token.
if auto_update:
interval = int(response['data']['expires_in'])
args = [client_id, client_secret, op_host, op_discovery_path,
scope, auto_update]
logger.info("Setting up a threading.Timer to get_client_token in "
"%s seconds", interval)
t = Timer(interval, self.get_client_token, args)
t.start()
return response['data']
|
python
|
{
"resource": ""
}
|
q16638
|
Client.remove_site
|
train
|
def remove_site(self):
"""Cleans up the data for the site.
Returns:
oxd_id if the process was completed without error
Raises:
OxdServerError if there was an issue with the operation
"""
params = dict(oxd_id=self.oxd_id)
logger.debug("Sending command `remove_site` with params %s",
params)
response = self.msgr.request("remove_site", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
return response['data']['oxd_id']
|
python
|
{
"resource": ""
}
|
q16639
|
Client.introspect_rpt
|
train
|
def introspect_rpt(self, rpt):
"""Gives information about an RPT.
Parameters:
* **rpt (str, required):** rpt from uma_rp_get_rpt function
Returns:
**dict:** The information about the RPT.
Example response ::
{
"active": true,
"exp": 1256953732,
"iat": 1256912345,
"nbf": null,
"permissions": [{
"resource_id": "112210f47de98100",
"resource_scopes": [
"view",
"http://photoz.example.com/dev/actions/print"
],
"exp": 1256953732
}],
"client_id": "@6F96!19756yCF4F!C964FF",
"sub": "John Doe",
"aud": "@6F96!19756yCF4F!C964FF",
"iss": "https://idp.example.com",
"jti": null
}
Raises:
OxdServerError if there was an issue with the operation
"""
params = dict(oxd_id=self.oxd_id)
params['rpt'] = rpt
logger.debug("Sending command `introspect_rpt` with params %s",
params)
response = self.msgr.request("introspect_rpt", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
return response['data']
|
python
|
{
"resource": ""
}
|
q16640
|
DjangoCmsMixin.get_placeholder_field_names
|
train
|
def get_placeholder_field_names(self):
"""
Returns a list with the names of all PlaceholderFields.
"""
return [field.name for field in self._meta.fields if field.get_internal_type() == 'PlaceholderField']
|
python
|
{
"resource": ""
}
|
q16641
|
logout_callback
|
train
|
def logout_callback():
"""Route called by the OpenID provider when user logs out.
Clear the cookies here.
"""
resp = make_response('Logging Out')
resp.set_cookie('sub', 'null', expires=0)
resp.set_cookie('session_id', 'null', expires=0)
return resp
|
python
|
{
"resource": ""
}
|
q16642
|
SocketMessenger.__connect
|
train
|
def __connect(self):
"""A helper function to make connection."""
try:
logger.debug("Socket connecting to %s:%s", self.host, self.port)
self.sock.connect((self.host, self.port))
except socket.error as e:
logger.exception("socket error %s", e)
logger.error("Closing socket and recreating a new one.")
self.sock.close()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
|
python
|
{
"resource": ""
}
|
q16643
|
SocketMessenger.send
|
train
|
def send(self, command):
"""send function sends the command to the oxd server and recieves the
response.
Parameters:
* **command (dict):** Dict representation of the JSON command string
Returns:
**response (dict):** The JSON response from the oxd Server as a dict
"""
cmd = json.dumps(command)
cmd = "{:04d}".format(len(cmd)) + cmd
msg_length = len(cmd)
# make the first time connection
if not self.firstDone:
logger.info('Initiating first time socket connection.')
self.__connect()
self.firstDone = True
# Send the message the to the server
totalsent = 0
while totalsent < msg_length:
try:
logger.debug("Sending: %s", cmd[totalsent:])
sent = self.sock.send(cmd[totalsent:])
totalsent = totalsent + sent
except socket.error as e:
logger.exception("Reconneting due to socket error. %s", e)
self.__connect()
logger.info("Reconnected to socket.")
# Check and receive the response if available
parts = []
resp_length = 0
received = 0
done = False
while not done:
part = self.sock.recv(1024)
if part == "":
logger.error("Socket connection broken, read empty.")
self.__connect()
logger.info("Reconnected to socket.")
# Find out the length of the response
if len(part) > 0 and resp_length == 0:
resp_length = int(part[0:4])
part = part[4:]
# Set Done flag
received = received + len(part)
if received >= resp_length:
done = True
parts.append(part)
response = "".join(parts)
# return the JSON as a namedtuple object
return json.loads(response)
|
python
|
{
"resource": ""
}
|
q16644
|
SocketMessenger.request
|
train
|
def request(self, command, **kwargs):
"""Function that builds the request and returns the response from
oxd-server
Parameters:
* **command (str):** The command that has to be sent to the oxd-server
* ** **kwargs:** The parameters that should accompany the request
Returns:
**dict:** the returned response from oxd-server as a dictionary
"""
payload = {
"command": command,
"params": dict()
}
for item in kwargs.keys():
payload["params"][item] = kwargs.get(item)
if self.access_token:
payload["params"]["protection_access_token"] = self.access_token
return self.send(payload)
|
python
|
{
"resource": ""
}
|
q16645
|
HttpMessenger.request
|
train
|
def request(self, command, **kwargs):
"""Function that builds the request and returns the response
Parameters:
* **command (str):** The command that has to be sent to the oxd-server
* ** **kwargs:** The parameters that should accompany the request
Returns:
**dict:** the returned response from oxd-server as a dictionary
"""
url = self.base + command.replace("_", "-")
req = urllib2.Request(url, json.dumps(kwargs))
req.add_header("User-Agent", "oxdpython/%s" % __version__)
req.add_header("Content-type", "application/json; charset=UTF-8")
# add the protection token if available
if self.access_token:
req.add_header("Authorization",
"Bearer {0}".format(self.access_token))
gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
resp = urllib2.urlopen(req, context=gcontext)
return json.load(resp)
|
python
|
{
"resource": ""
}
|
q16646
|
ChebiEntity.get_mol_filename
|
train
|
def get_mol_filename(self):
'''Returns mol filename'''
mol_filename = parsers.get_mol_filename(self.__chebi_id)
if mol_filename is None:
mol_filename = parsers.get_mol_filename(self.get_parent_id())
if mol_filename is None:
for parent_or_child_id in self.__get_all_ids():
mol_filename = \
parsers.get_mol_filename(parent_or_child_id)
if mol_filename is not None:
break
return mol_filename
|
python
|
{
"resource": ""
}
|
q16647
|
ChebiEntity.__get_all_ids
|
train
|
def __get_all_ids(self):
'''Returns all ids'''
if self.__all_ids is None:
parent_id = parsers.get_parent_id(self.__chebi_id)
self.__all_ids = parsers.get_all_ids(self.__chebi_id
if math.isnan(parent_id)
else parent_id)
if self.__all_ids is None:
self.__all_ids = []
return self.__all_ids
|
python
|
{
"resource": ""
}
|
q16648
|
Resource.set_scope
|
train
|
def set_scope(self, http_method, scope):
"""Set a scope condition for the resource for a http_method
Parameters:
* **http_method (str):** HTTP method like GET, POST, PUT, DELETE
* **scope (str, list):** the scope of access control as str if single, or as a list of strings if multiple scopes are to be set
"""
for con in self.conditions:
if http_method in con['httpMethods']:
if isinstance(scope, list):
con['scopes'] = scope
elif isinstance(scope, str) or isinstance(scope, unicode):
con['scopes'].append(scope)
return
# If not present, then create a new condition
if isinstance(scope, list):
self.conditions.append({'httpMethods': [http_method],
'scopes': scope})
elif isinstance(scope, str) or isinstance(scope, unicode):
self.conditions.append({'httpMethods': [http_method],
'scopes': [scope]})
|
python
|
{
"resource": ""
}
|
q16649
|
ResourceSet.add
|
train
|
def add(self, path):
"""Adds a new resource with the given path to the resource set.
Parameters:
* **path (str, unicode):** path of the resource to be protected
Raises:
TypeError when the path is not a string or a unicode string
"""
if not isinstance(path, str) and not isinstance(path, unicode):
raise TypeError('The value passed for parameter path is not a str'
' or unicode')
resource = Resource(path)
self.resources[path] = resource
return resource
|
python
|
{
"resource": ""
}
|
q16650
|
search
|
train
|
def search(term, exact=False, rows=1e6):
'''Searches ChEBI via ols.'''
url = 'https://www.ebi.ac.uk/ols/api/search?ontology=chebi' + \
'&exact=' + str(exact) + '&q=' + term + \
'&rows=' + str(int(rows))
response = requests.get(url)
data = response.json()
return [ChebiEntity(doc['obo_id']) for doc in data['response']['docs']]
|
python
|
{
"resource": ""
}
|
q16651
|
Configurer.get
|
train
|
def get(self, section, key):
"""get function reads the config value for the requested section and
key and returns it
Parameters:
* **section (string):** the section to look for the config value either - oxd, client
* **key (string):** the key for the config value required
Returns:
**value (string):** the function returns the value of the key in the appropriate format if found or returns None if such a section or key couldnot be found
Example:
config = Configurer(location)
oxd_port = config.get('oxd', 'port') # returns the port of the oxd
"""
try:
return self.parser.get(section, key)
except (NoOptionError, NoSectionError) as e:
logger.warning("%s", e)
return None
|
python
|
{
"resource": ""
}
|
q16652
|
Configurer.set
|
train
|
def set(self, section, key, value):
"""set function sets a particular value for the specified key in the
specified section and writes it to the config file.
Parameters:
* **section (string):** the section under which the config should be saved. Only accepted values are - oxd, client
* **key (string):** the key/name of the config value
* **value (string):** the value which needs to be stored as a string
Returns:
**success (bool):** a boolean indication of whether the value was stored successfully in the file
"""
if not self.parser.has_section(section):
logger.warning("Invalid config section: %s", section)
return False
self.parser.set(section, key, value)
with open(self.config_file, 'wb') as cfile:
self.parser.write(cfile)
return True
|
python
|
{
"resource": ""
}
|
q16653
|
get_all_formulae
|
train
|
def get_all_formulae(chebi_ids):
'''Returns all formulae'''
all_formulae = [get_formulae(chebi_id) for chebi_id in chebi_ids]
return [x for sublist in all_formulae for x in sublist]
|
python
|
{
"resource": ""
}
|
q16654
|
get_all_comments
|
train
|
def get_all_comments(chebi_ids):
'''Returns all comments'''
all_comments = [get_comments(chebi_id) for chebi_id in chebi_ids]
return [x for sublist in all_comments for x in sublist]
|
python
|
{
"resource": ""
}
|
q16655
|
get_all_compound_origins
|
train
|
def get_all_compound_origins(chebi_ids):
'''Returns all compound origins'''
all_compound_origins = [get_compound_origins(chebi_id)
for chebi_id in chebi_ids]
return [x for sublist in all_compound_origins for x in sublist]
|
python
|
{
"resource": ""
}
|
q16656
|
get_all_modified_on
|
train
|
def get_all_modified_on(chebi_ids):
'''Returns all modified on'''
all_modified_ons = [get_modified_on(chebi_id) for chebi_id in chebi_ids]
all_modified_ons = [modified_on for modified_on in all_modified_ons
if modified_on is not None]
return None if len(all_modified_ons) == 0 else sorted(all_modified_ons)[-1]
|
python
|
{
"resource": ""
}
|
q16657
|
get_all_database_accessions
|
train
|
def get_all_database_accessions(chebi_ids):
'''Returns all database accessions'''
all_database_accessions = [get_database_accessions(chebi_id)
for chebi_id in chebi_ids]
return [x for sublist in all_database_accessions for x in sublist]
|
python
|
{
"resource": ""
}
|
q16658
|
get_all_names
|
train
|
def get_all_names(chebi_ids):
'''Returns all names'''
all_names = [get_names(chebi_id) for chebi_id in chebi_ids]
return [x for sublist in all_names for x in sublist]
|
python
|
{
"resource": ""
}
|
q16659
|
get_all_outgoings
|
train
|
def get_all_outgoings(chebi_ids):
'''Returns all outgoings'''
all_outgoings = [get_outgoings(chebi_id) for chebi_id in chebi_ids]
return [x for sublist in all_outgoings for x in sublist]
|
python
|
{
"resource": ""
}
|
q16660
|
get_all_incomings
|
train
|
def get_all_incomings(chebi_ids):
'''Returns all incomings'''
all_incomings = [get_incomings(chebi_id) for chebi_id in chebi_ids]
return [x for sublist in all_incomings for x in sublist]
|
python
|
{
"resource": ""
}
|
q16661
|
get_mol_filename
|
train
|
def get_mol_filename(chebi_id):
'''Returns mol file'''
mol = get_mol(chebi_id)
if mol is None:
return None
file_descriptor, mol_filename = tempfile.mkstemp(str(chebi_id) +
'_', '.mol')
mol_file = open(mol_filename, 'w')
mol_file.write(mol.get_structure())
mol_file.close()
os.close(file_descriptor)
return mol_filename
|
python
|
{
"resource": ""
}
|
q16662
|
get_file
|
train
|
def get_file(filename):
'''Downloads filename from ChEBI FTP site'''
destination = __DOWNLOAD_PARAMS['path']
filepath = os.path.join(destination, filename)
if not __is_current(filepath):
if not os.path.exists(destination):
os.makedirs(destination)
url = 'ftp://ftp.ebi.ac.uk/pub/databases/chebi/' + \
'Flat_file_tab_delimited/'
urlretrieve(urlparse.urljoin(url, filename), filepath)
urlcleanup()
if filepath.endswith('.zip'):
zfile = zipfile.ZipFile(filepath, 'r')
filepath = os.path.join(destination, zfile.namelist()[0])
zfile.extractall(destination)
elif filepath.endswith('.gz'):
unzipped_filepath = filepath[:-len('.gz')]
if os.path.exists(unzipped_filepath) \
and __is_current(unzipped_filepath):
filepath = unzipped_filepath
else:
input_file = gzip.open(filepath, 'rb')
filepath = os.path.join(destination, input_file.name[:-len('.gz')])
output_file = open(filepath, 'wb')
for line in input_file:
output_file.write(line)
input_file.close()
output_file.close()
return filepath
|
python
|
{
"resource": ""
}
|
q16663
|
__is_current
|
train
|
def __is_current(filepath):
'''Checks whether file is current'''
if not __DOWNLOAD_PARAMS['auto_update']:
return True
if not os.path.isfile(filepath):
return False
return datetime.datetime.utcfromtimestamp(os.path.getmtime(filepath)) \
> __get_last_update_time()
|
python
|
{
"resource": ""
}
|
q16664
|
__get_last_update_time
|
train
|
def __get_last_update_time():
'''Returns last FTP site update time'''
now = datetime.datetime.utcnow()
# Get the first Tuesday of the month
first_tuesday = __get_first_tuesday(now)
if first_tuesday < now:
return first_tuesday
else:
first_of_month = datetime.datetime(now.year, now.month, 1)
last_month = first_of_month + datetime.timedelta(days=-1)
return __get_first_tuesday(last_month)
|
python
|
{
"resource": ""
}
|
q16665
|
__get_first_tuesday
|
train
|
def __get_first_tuesday(this_date):
'''Get the first Tuesday of the month'''
month_range = calendar.monthrange(this_date.year, this_date.month)
first_of_month = datetime.datetime(this_date.year, this_date.month, 1)
first_tuesday_day = (calendar.TUESDAY - month_range[0]) % 7
first_tuesday = first_of_month + datetime.timedelta(days=first_tuesday_day)
return first_tuesday
|
python
|
{
"resource": ""
}
|
q16666
|
get_frontend_data_dict_for_cms_page
|
train
|
def get_frontend_data_dict_for_cms_page(cms_page, cms_page_title, request, editable=False):
"""
Returns the data dictionary of a CMS page that is used by the frontend.
"""
placeholders = list(cms_page.placeholders.all())
placeholder_frontend_data_dict = get_frontend_data_dict_for_placeholders(
placeholders=placeholders,
request=request,
editable=editable
)
global_placeholder_data_dict = get_global_placeholder_data(placeholder_frontend_data_dict)
data = {
'containers': placeholder_frontend_data_dict,
'meta': {
'title': cms_page_title.page_title if cms_page_title.page_title else cms_page_title.title,
'description': cms_page_title.meta_description or '',
}
}
language_links = get_language_links(cms_page=cms_page, request=request)
if language_links:
data['meta']['languages'] = language_links
if placeholder_frontend_data_dict:
data['containers'] = placeholder_frontend_data_dict
if global_placeholder_data_dict:
data['global_placeholder_data'] = global_placeholder_data_dict
post_processer = settings.DJANGOCMS_SPA_CMS_PAGE_DATA_POST_PROCESSOR
if post_processer:
func = get_function_by_path(post_processer)
data = func(cms_page=cms_page, data=data, request=request)
return data
|
python
|
{
"resource": ""
}
|
q16667
|
get_frontend_data_dict_for_placeholders
|
train
|
def get_frontend_data_dict_for_placeholders(placeholders, request, editable=False):
"""
Takes a list of placeholder instances and returns the data that is used by the frontend to render all contents.
The returned dict is grouped by placeholder slots.
"""
data_dict = {}
for placeholder in placeholders:
if placeholder:
plugins = []
# We don't use the helper method `placeholder.get_plugins()` because of the wrong order by path.
placeholder_plugins = placeholder.cmsplugin_set.filter(language=request.LANGUAGE_CODE).order_by(
settings.DJANGOCMS_SPA_PLUGIN_ORDER_FIELD)
for plugin in placeholder_plugins:
# We need the complete cascading structure of the plugins in the frontend. This is why we ignore the
# children here and add them later in the loop.
if not plugin.parent:
plugins.append(get_frontend_data_dict_for_plugin(
request=request,
plugin=plugin,
editable=editable)
)
if plugins or editable:
data_dict[placeholder.slot] = {
'type': 'cmp-%s' % placeholder.slot,
'plugins': plugins,
}
if editable:
# This is the structure of the template `cms/toolbar/placeholder.html` that is used to register
# the frontend editing.
from cms.plugin_pool import plugin_pool
plugin_types = [cls.__name__ for cls in plugin_pool.get_all_plugins(placeholder.slot, placeholder.page)]
allowed_plugins = plugin_types + plugin_pool.get_system_plugins()
data_dict[placeholder.slot]['cms'] = [
'cms-placeholder-{}'.format(placeholder.pk),
{
'type': 'placeholder',
'name': str(placeholder.get_label()),
'page_language': request.LANGUAGE_CODE,
'placeholder_id': placeholder.pk,
'plugin_language': request.LANGUAGE_CODE,
'plugin_restriction': [module for module in allowed_plugins],
'addPluginHelpTitle': 'Add plugin to placeholder {}'.format(placeholder.get_label()),
'urls': {
'add_plugin': placeholder.get_add_url(),
'copy_plugin': placeholder.get_copy_url()
}
}
]
return data_dict
|
python
|
{
"resource": ""
}
|
q16668
|
Writer.write_waypoint
|
train
|
def write_waypoint(self, latitude=None, longitude=None, description=None):
"""
Adds a waypoint to the current task declaration. The first and the
last waypoint added will be treated as takeoff and landing location,
respectively.
::
writer.write_waypoint(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> $PFLAC,S,ADDWP,5107345N,00624765E,Meiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for takeoff and landing points.
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param description: arbitrary text description of waypoint
"""
if not description:
description = ''
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
self.write_config(
'ADDWP', '%s,%s,%s' % (latitude, longitude, description[0:50])
)
|
python
|
{
"resource": ""
}
|
q16669
|
Model.set_model_pathnames
|
train
|
def set_model_pathnames(self):
"""Define the paths associated with this model."""
self.control_path = self.expt.control_path
self.input_basepath = self.expt.lab.input_basepath
self.work_path = self.expt.work_path
self.codebase_path = self.expt.lab.codebase_path
if len(self.expt.models) > 1:
self.control_path = os.path.join(self.control_path, self.name)
self.work_path = os.path.join(self.work_path, self.name)
self.codebase_path = os.path.join(self.codebase_path, self.name)
# NOTE: Individual models may override the work subdirectories
self.work_input_path = self.work_path
self.work_restart_path = self.work_path
self.work_output_path = self.work_path
self.work_init_path = self.work_path
self.exec_prefix = self.config.get('exe_prefix', '')
self.exec_name = self.config.get('exe', self.default_exec)
if self.exec_name:
# By default os.path.join will not prepend the lab bin_path
# to an absolute path
self.exec_path = os.path.join(self.expt.lab.bin_path,
self.exec_name)
else:
self.exec_path = None
if self.exec_path:
# Make exec_name consistent for models with fully qualified path.
# In all cases it will just be the name of the executable without a
# path
self.exec_name = os.path.basename(self.exec_path)
|
python
|
{
"resource": ""
}
|
q16670
|
Model.archive
|
train
|
def archive(self):
"""Store model output to laboratory archive."""
# Traverse the model directory deleting symlinks, zero length files
# and empty directories
for path, dirs, files in os.walk(self.work_path, topdown=False):
for f_name in files:
f_path = os.path.join(path, f_name)
if os.path.islink(f_path) or os.path.getsize(f_path) == 0:
os.remove(f_path)
if len(os.listdir(path)) == 0:
os.rmdir(path)
|
python
|
{
"resource": ""
}
|
q16671
|
parse
|
train
|
def parse():
"""Parse the command line inputs and execute the subcommand."""
# Build the list of subcommand modules
modnames = [mod for (_, mod, _)
in pkgutil.iter_modules(payu.subcommands.__path__,
prefix=payu.subcommands.__name__ + '.')
if mod.endswith('_cmd')]
subcmds = [importlib.import_module(mod) for mod in modnames]
# Construct the subcommand parser
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version',
version='payu {0}'.format(payu.__version__))
subparsers = parser.add_subparsers()
for cmd in subcmds:
cmd_parser = subparsers.add_parser(cmd.title, **cmd.parameters)
cmd_parser.set_defaults(run_cmd=cmd.runcmd)
for arg in cmd.arguments:
cmd_parser.add_argument(*arg['flags'], **arg['parameters'])
# Display help if no arguments are provided
if len(sys.argv) == 1:
parser.print_help()
else:
args = vars(parser.parse_args())
run_cmd = args.pop('run_cmd')
run_cmd(**args)
|
python
|
{
"resource": ""
}
|
q16672
|
get_model_type
|
train
|
def get_model_type(model_type, config):
"""Determine and validate the active model type."""
# If no model type is given, then check the config file
if not model_type:
model_type = config.get('model')
# If there is still no model type, try the parent directory
if not model_type:
model_type = os.path.basename(os.path.abspath(os.pardir))
print('payu: warning: Assuming model is {0} based on parent directory '
'name.'.format(model_type))
if model_type not in supported_models:
print('payu: error: Unknown model {0}'.format(model_type))
sys.exit(-1)
|
python
|
{
"resource": ""
}
|
q16673
|
set_env_vars
|
train
|
def set_env_vars(init_run=None, n_runs=None, lab_path=None, dir_path=None,
reproduce=None):
"""Construct the environment variables used by payu for resubmissions."""
payu_env_vars = {}
# Setup Python dynamic library link
lib_paths = sysconfig.get_config_vars('LIBDIR')
payu_env_vars['LD_LIBRARY_PATH'] = ':'.join(lib_paths)
if 'PYTHONPATH' in os.environ:
payu_env_vars['PYTHONPATH'] = os.environ['PYTHONPATH']
# Set (or import) the path to the PAYU scripts (PAYU_PATH)
# NOTE: We may be able to use sys.path[0] here.
payu_binpath = os.environ.get('PAYU_PATH')
if not payu_binpath or not os.path.isdir(payu_binpath):
payu_binpath = os.path.dirname(sys.argv[0])
payu_env_vars['PAYU_PATH'] = payu_binpath
# Set the run counters
if init_run:
init_run = int(init_run)
assert init_run >= 0
payu_env_vars['PAYU_CURRENT_RUN'] = init_run
if n_runs:
n_runs = int(n_runs)
assert n_runs > 0
payu_env_vars['PAYU_N_RUNS'] = n_runs
# Import explicit project paths
if lab_path:
payu_env_vars['PAYU_LAB_PATH'] = os.path.normpath(lab_path)
if dir_path:
payu_env_vars['PAYU_DIR_PATH'] = os.path.normpath(dir_path)
if reproduce:
payu_env_vars['PAYU_REPRODUCE'] = reproduce
return payu_env_vars
|
python
|
{
"resource": ""
}
|
q16674
|
submit_job
|
train
|
def submit_job(pbs_script, pbs_config, pbs_vars=None):
"""Submit a userscript the scheduler."""
# Initialisation
if pbs_vars is None:
pbs_vars = {}
pbs_flags = []
pbs_queue = pbs_config.get('queue', 'normal')
pbs_flags.append('-q {queue}'.format(queue=pbs_queue))
pbs_project = pbs_config.get('project', os.environ['PROJECT'])
pbs_flags.append('-P {project}'.format(project=pbs_project))
pbs_resources = ['walltime', 'ncpus', 'mem', 'jobfs']
for res_key in pbs_resources:
res_flags = []
res_val = pbs_config.get(res_key)
if res_val:
res_flags.append('{key}={val}'.format(key=res_key, val=res_val))
if res_flags:
pbs_flags.append('-l {res}'.format(res=','.join(res_flags)))
# TODO: Need to pass lab.config_path somehow...
pbs_jobname = pbs_config.get('jobname', os.path.basename(os.getcwd()))
if pbs_jobname:
# PBSPro has a 15-character jobname limit
pbs_flags.append('-N {name}'.format(name=pbs_jobname[:15]))
pbs_priority = pbs_config.get('priority')
if pbs_priority:
pbs_flags.append('-p {priority}'.format(priority=pbs_priority))
pbs_flags.append('-l wd')
pbs_join = pbs_config.get('join', 'n')
if pbs_join not in ('oe', 'eo', 'n'):
print('payu: error: unknown qsub IO stream join setting.')
sys.exit(-1)
else:
pbs_flags.append('-j {join}'.format(join=pbs_join))
# Append environment variables to qsub command
# TODO: Support full export of environment variables: `qsub -V`
pbs_vstring = ','.join('{0}={1}'.format(k, v)
for k, v in pbs_vars.items())
pbs_flags.append('-v ' + pbs_vstring)
# Append any additional qsub flags here
pbs_flags_extend = pbs_config.get('qsub_flags')
if pbs_flags_extend:
pbs_flags.append(pbs_flags_extend)
if not os.path.isabs(pbs_script):
# NOTE: PAYU_PATH is always set if `set_env_vars` was always called.
# This is currently always true, but is not explicitly enforced.
# So this conditional check is a bit redundant.
payu_bin = pbs_vars.get('PAYU_PATH', os.path.dirname(sys.argv[0]))
pbs_script = os.path.join(payu_bin, pbs_script)
assert os.path.isfile(pbs_script)
# Set up environment modules here for PBS.
envmod.setup()
envmod.module('load', 'pbs')
# Construct job submission command
cmd = 'qsub {flags} -- {python} {script}'.format(
flags=' '.join(pbs_flags),
python=sys.executable,
script=pbs_script
)
print(cmd)
subprocess.check_call(shlex.split(cmd))
|
python
|
{
"resource": ""
}
|
q16675
|
Namcouple.substitute_timestep
|
train
|
def substitute_timestep(self, regex, timestep):
"""
Substitute a new timestep value using regex.
"""
# Make one change at a time, each change affects subsequent matches.
timestep_changed = False
while True:
matches = re.finditer(regex, self.str, re.MULTILINE | re.DOTALL)
none_updated = True
for m in matches:
if m.group(1) == timestep:
continue
else:
self.str = (self.str[:m.start(1)] + timestep +
self.str[m.end(1):])
none_updated = False
timestep_changed = True
break
if none_updated:
break
if not timestep_changed:
sys.stderr.write('WARNING: no update with {0}.\n'.format(regex))
|
python
|
{
"resource": ""
}
|
q16676
|
int_to_date
|
train
|
def int_to_date(date):
"""
Convert an int of form yyyymmdd to a python date object.
"""
year = date // 10**4
month = date % 10**4 // 10**2
day = date % 10**2
return datetime.date(year, month, day)
|
python
|
{
"resource": ""
}
|
q16677
|
runtime_from_date
|
train
|
def runtime_from_date(start_date, years, months, days, seconds, caltype):
"""
Get the number of seconds from start date to start date + date_delta.
Ignores Feb 29 for caltype == NOLEAP.
"""
end_date = start_date + relativedelta(years=years, months=months,
days=days)
runtime = end_date - start_date
if caltype == NOLEAP:
runtime -= get_leapdays(start_date, end_date)
return int(runtime.total_seconds() + seconds)
|
python
|
{
"resource": ""
}
|
q16678
|
date_plus_seconds
|
train
|
def date_plus_seconds(init_date, seconds, caltype):
"""
Get a new_date = date + seconds.
Ignores Feb 29 for no-leap days.
"""
end_date = init_date + datetime.timedelta(seconds=seconds)
if caltype == NOLEAP:
end_date += get_leapdays(init_date, end_date)
if end_date.month == 2 and end_date.day == 29:
end_date += datetime.timedelta(days=1)
return end_date
|
python
|
{
"resource": ""
}
|
q16679
|
get_leapdays
|
train
|
def get_leapdays(init_date, final_date):
"""
Find the number of leap days between arbitrary dates. Returns a
timedelta object.
FIXME: calculate this instead of iterating.
"""
curr_date = init_date
leap_days = 0
while curr_date != final_date:
if curr_date.month == 2 and curr_date.day == 29:
leap_days += 1
curr_date += datetime.timedelta(days=1)
return datetime.timedelta(days=leap_days)
|
python
|
{
"resource": ""
}
|
q16680
|
calculate_leapdays
|
train
|
def calculate_leapdays(init_date, final_date):
"""Currently unsupported, it only works for differences in years."""
leap_days = (final_date.year - 1) // 4 - (init_date.year - 1) // 4
leap_days -= (final_date.year - 1) // 100 - (init_date.year - 1) // 100
leap_days += (final_date.year - 1) // 400 - (init_date.year - 1) // 400
# TODO: Internal date correction (e.g. init_date is 1-March or later)
return datetime.timedelta(days=leap_days)
|
python
|
{
"resource": ""
}
|
q16681
|
Laboratory.get_default_lab_path
|
train
|
def get_default_lab_path(self, config):
"""Generate a default laboratory path based on user environment."""
# Default path settings
# Append project name if present (NCI-specific)
default_project = os.environ.get('PROJECT', '')
default_short_path = os.path.join('/short', default_project)
default_user = pwd.getpwuid(os.getuid()).pw_name
short_path = config.get('shortpath', default_short_path)
lab_name = config.get('laboratory', self.model_type)
if os.path.isabs(lab_name):
lab_path = lab_name
else:
user_name = config.get('user', default_user)
lab_path = os.path.join(short_path, user_name, lab_name)
return lab_path
|
python
|
{
"resource": ""
}
|
q16682
|
Laboratory.initialize
|
train
|
def initialize(self):
"""Create the laboratory directories."""
mkdir_p(self.archive_path)
mkdir_p(self.bin_path)
mkdir_p(self.codebase_path)
mkdir_p(self.input_basepath)
|
python
|
{
"resource": ""
}
|
q16683
|
get_job_id
|
train
|
def get_job_id(short=True):
"""
Return PBS job id
"""
jobid = os.environ.get('PBS_JOBID', '')
if short:
# Strip off '.rman2'
jobid = jobid.split('.')[0]
return(jobid)
|
python
|
{
"resource": ""
}
|
q16684
|
get_job_info
|
train
|
def get_job_info():
"""
Get information about the job from the PBS server
"""
jobid = get_job_id()
if jobid == '':
return None
info = get_qstat_info('-ft {0}'.format(jobid), 'Job Id:')
# Select the dict for this job (there should only be one entry in any case)
info = info['Job Id: {}'.format(jobid)]
# Add the jobid to the dict and then return
info['Job_ID'] = jobid
return info
|
python
|
{
"resource": ""
}
|
q16685
|
Experiment.postprocess
|
train
|
def postprocess(self):
"""Submit a postprocessing script after collation"""
assert self.postscript
envmod.setup()
envmod.module('load', 'pbs')
cmd = 'qsub {script}'.format(script=self.postscript)
cmd = shlex.split(cmd)
rc = sp.call(cmd)
assert rc == 0, 'Postprocessing script submission failed.'
|
python
|
{
"resource": ""
}
|
q16686
|
date_to_um_date
|
train
|
def date_to_um_date(date):
"""
Convert a date object to 'year, month, day, hour, minute, second.'
"""
assert date.hour == 0 and date.minute == 0 and date.second == 0
return [date.year, date.month, date.day, 0, 0, 0]
|
python
|
{
"resource": ""
}
|
q16687
|
um_date_to_date
|
train
|
def um_date_to_date(d):
"""
Convert a string with format 'year, month, day, hour, minute, second'
to a datetime date.
"""
return datetime.datetime(year=d[0], month=d[1], day=d[2],
hour=d[3], minute=d[4], second=d[5])
|
python
|
{
"resource": ""
}
|
q16688
|
setup
|
train
|
def setup(basepath=DEFAULT_BASEPATH):
"""Set the environment modules used by the Environment Module system."""
module_version = os.environ.get('MODULE_VERSION', DEFAULT_VERSION)
moduleshome = os.path.join(basepath, module_version)
# Abort if MODULESHOME does not exist
if not os.path.isdir(moduleshome):
print('payu: warning: MODULESHOME does not exist; disabling '
'environment modules.')
os.environ['MODULESHOME'] = ''
return
os.environ['MODULE_VERSION'] = module_version
os.environ['MODULE_VERSION_STACK'] = module_version
os.environ['MODULESHOME'] = moduleshome
if 'MODULEPATH' not in os.environ:
module_initpath = os.path.join(moduleshome, 'init', '.modulespath')
with open(module_initpath) as initpaths:
modpaths = [
line.partition('#')[0].strip()
for line in initpaths.readlines() if not line.startswith('#')
]
os.environ['MODULEPATH'] = ':'.join(modpaths)
os.environ['LOADEDMODULES'] = os.environ.get('LOADEDMODULES', '')
# Environment modules with certain characters will cause corruption
# when MPI jobs get launched on other nodes (possibly a PBS issue).
#
# Bash processes obscure the issue on Raijin, since it occurs in an
# environment module function, and bash moves those to the end of
# the environment variable list.
#
# Raijin's mpirun wrapper is a bash script, and therefore "fixes" by doing
# the shuffle and limiting the damage to other bash functions, but some
# wrappers (e.g. OpenMPI 2.1.x) may not be present. So we manually patch
# the problematic variable here. But a more general solution would be nice
# someday.
if 'BASH_FUNC_module()' in os.environ:
bash_func_module = os.environ['BASH_FUNC_module()']
os.environ['BASH_FUNC_module()'] = bash_func_module.replace('\n', ';')
|
python
|
{
"resource": ""
}
|
q16689
|
module
|
train
|
def module(command, *args):
"""Run the modulecmd tool and use its Python-formatted output to set the
environment variables."""
if 'MODULESHOME' not in os.environ:
print('payu: warning: No Environment Modules found; skipping {0} call.'
''.format(command))
return
modulecmd = ('{0}/bin/modulecmd'.format(os.environ['MODULESHOME']))
cmd = '{0} python {1} {2}'.format(modulecmd, command, ' '.join(args))
envs, _ = subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE).communicate()
exec(envs)
|
python
|
{
"resource": ""
}
|
q16690
|
Mom6.init_config
|
train
|
def init_config(self):
"""Patch input.nml as a new or restart run."""
input_fpath = os.path.join(self.work_path, 'input.nml')
input_nml = f90nml.read(input_fpath)
if self.expt.counter == 0 or self.expt.repeat_run:
input_type = 'n'
else:
input_type = 'r'
input_nml['MOM_input_nml']['input_filename'] = input_type
f90nml.write(input_nml, input_fpath, force=True)
|
python
|
{
"resource": ""
}
|
q16691
|
mkdir_p
|
train
|
def mkdir_p(path):
"""Create a new directory; ignore if it already exists."""
try:
os.makedirs(path)
except EnvironmentError as exc:
if exc.errno != errno.EEXIST:
raise
|
python
|
{
"resource": ""
}
|
q16692
|
read_config
|
train
|
def read_config(config_fname=None):
"""Parse input configuration file and return a config dict."""
if not config_fname:
config_fname = DEFAULT_CONFIG_FNAME
try:
with open(config_fname, 'r') as config_file:
config = yaml.load(config_file)
except IOError as exc:
if exc.errno == errno.ENOENT:
print('payu: warning: Configuration file {0} not found!'
.format(config_fname))
config = {}
else:
raise
collate_config = config.pop('collate', {})
# Transform legacy collate config options
if type(collate_config) is bool:
collate_config = {'enable': collate_config}
collatestr = 'collate_'
foundkeys = []
# Cycle through old collate config and convert to newer dict format
for key in list(config.keys()):
if key.startswith(collatestr):
foundkeys.append(key)
collate_config[key[len(collatestr):]] = config.pop(key)
if foundkeys:
print("Use of these keys is deprecated: {}.".format(
", ".join(foundkeys)))
print("Instead use collate dictionary and subkey "
"without 'collate_' prefix")
config['collate'] = collate_config
return config
|
python
|
{
"resource": ""
}
|
q16693
|
make_symlink
|
train
|
def make_symlink(src_path, lnk_path):
"""Safely create a symbolic link to an input field."""
# Check for Lustre 60-character symbolic link path bug
if CHECK_LUSTRE_PATH_LEN:
src_path = patch_lustre_path(src_path)
lnk_path = patch_lustre_path(lnk_path)
# os.symlink will happily make a symlink to a non-existent
# file, but we don't want that behaviour
# XXX: Do we want to be doing this?
if not os.path.exists(src_path):
return
try:
os.symlink(src_path, lnk_path)
except EnvironmentError as exc:
if exc.errno != errno.EEXIST:
raise
elif not os.path.islink(lnk_path):
# Warn the user, but do not interrupt the job
print("Warning: Cannot create symbolic link to {p}; a file named "
"{f} already exists.".format(p=src_path, f=lnk_path))
else:
# Overwrite any existing symbolic link
if os.path.realpath(lnk_path) != src_path:
os.remove(lnk_path)
os.symlink(src_path, lnk_path)
|
python
|
{
"resource": ""
}
|
q16694
|
splitpath
|
train
|
def splitpath(path):
"""Recursively split a filepath into all directories and files."""
head, tail = os.path.split(path)
if tail == '':
return head,
elif head == '':
return tail,
else:
return splitpath(head) + (tail,)
|
python
|
{
"resource": ""
}
|
q16695
|
patch_lustre_path
|
train
|
def patch_lustre_path(f_path):
"""Patch any 60-character pathnames, to avoid a current Lustre bug."""
if CHECK_LUSTRE_PATH_LEN and len(f_path) == 60:
if os.path.isabs(f_path):
f_path = '/.' + f_path
else:
f_path = './' + f_path
return f_path
|
python
|
{
"resource": ""
}
|
q16696
|
PayuManifest.check_fast
|
train
|
def check_fast(self, reproduce=False, **args):
"""
Check hash value for all filepaths using a fast hash function and fall
back to slower full hash functions if fast hashes fail to agree.
"""
hashvals = {}
fast_check = self.check_file(
filepaths=self.data.keys(),
hashvals=hashvals,
hashfn=fast_hashes,
shortcircuit=True,
**args
)
if not fast_check:
# Save all the fast hashes for failed files that we've already
# calculated
for filepath in hashvals:
for hash, val in hashvals[filepath].items():
self.data[filepath]['hashes'][hash] = val
if reproduce:
for filepath in hashvals:
print('Check failed for {0} {1}'
''.format(filepath, hashvals[filepath]))
tmphash = {}
full_check = self.check_file(
filepaths=filepath,
hashfn=full_hashes,
hashvals=tmphash,
shortcircuit=False,
**args
)
if full_check:
# File is still ok, so replace fast hashes
print('Full hashes ({0}) checked ok'
''.format(full_hashes))
print('Updating fast hashes for {0} in {1}'
''.format(filepath, self.path))
self.add_fast(filepath, force=True)
print('Saving updated manifest')
self.needsync = True
else:
sys.stderr.write(
'Run cannot reproduce: manifest {0} is not '
'correct\n'.format(self.path)
)
for path, hashdict in tmphash.items():
print(' {0}:'.format(path))
for hash, val in hashdict.items():
hash_table = self.data[path]['hashes']
hash_table_val = hash_table.get(hash, None)
print(' {0}: {1} != {2}'
''.format(hash, val, hash_table_val))
sys.exit(1)
else:
# Not relevant if full hashes are correct. Regenerate full
# hashes for all filepaths that failed fast check.
print('Updating full hashes for {0} files in {1}'
''.format(len(hashvals), self.path))
# Add all full hashes at once -- much faster. Definitely want
# to force the full hash to be updated. In the specific case of
# an empty hash the value will be None, without force it will
# be written as null.
self.add(
filepaths=list(hashvals.keys()),
hashfn=full_hashes,
force=True,
fullpaths=[self.fullpath(fpath) for fpath
in list(hashvals.keys())]
)
# Flag need to update version on disk
self.needsync = True
|
python
|
{
"resource": ""
}
|
q16697
|
PayuManifest.add_filepath
|
train
|
def add_filepath(self, filepath, fullpath, copy=False):
"""
Bespoke function to add filepath & fullpath to manifest
object without hashing. Can defer hashing until all files are
added. Hashing all at once is much faster as overhead for
threading is spread over all files
"""
# Ignore directories
if os.path.isdir(fullpath):
return False
# Ignore anything matching the ignore patterns
for pattern in self.ignore:
if fnmatch.fnmatch(os.path.basename(fullpath), pattern):
return False
if filepath not in self.data:
self.data[filepath] = {}
self.data[filepath]['fullpath'] = fullpath
if 'hashes' not in self.data[filepath]:
self.data[filepath]['hashes'] = {hash: None for hash in all_hashes}
if copy:
self.data[filepath]['copy'] = copy
if hasattr(self, 'existing_filepaths'):
if filepath in self.existing_filepaths:
self.existing_filepaths.remove(filepath)
return True
|
python
|
{
"resource": ""
}
|
q16698
|
PayuManifest.add_fast
|
train
|
def add_fast(self, filepath, hashfn=None, force=False):
"""
Bespoke function to add filepaths but set shortcircuit to True, which
means only the first calculable hash will be stored. In this way only
one "fast" hashing function need be called for each filepath.
"""
if hashfn is None:
hashfn = fast_hashes
self.add(filepath, hashfn, force, shortcircuit=True)
|
python
|
{
"resource": ""
}
|
q16699
|
PayuManifest.copy_file
|
train
|
def copy_file(self, filepath):
"""
Returns flag which says to copy rather than link a file.
"""
copy_file = False
try:
copy_file = self.data[filepath]['copy']
except KeyError:
return False
return copy_file
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.