sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def guess_timefmt(datestr): """ Try to guess the format a date is written in. The following formats are supported: ================= ============== =============== Format Example Python format ----------------- -------------- --------------- ``YYYY-MM-DD`` 2002-04-21 %Y-%m-%d ``YYYY.MM.DD`` 2002.04.21 %Y.%m.%d ``YYYY MM DD`` 2002 04 21 %Y %m %d ``DD-MM-YYYY`` 21-04-2002 %d-%m-%Y ``DD.MM.YYYY`` 21.04.2002 %d.%m.%Y ``DD MM YYYY`` 21 04 2002 %d %m %Y ``DD/MM/YYYY`` 21/04/2002 %d/%m/%Y ================= ============== =============== These formats can also be used for seasonal (yearly recurring) time series. The year needs to be replaced by ``9999`` or another configurable year representing the seasonal year.. The following formats are recognised depending on your locale setting. There is no guarantee that this will work. ================= ============== =============== Format Example Python format ----------------- -------------- --------------- ``DD-mmm-YYYY`` 21-Apr-2002 %d-%b-%Y ``DD.mmm.YYYY`` 21.Apr.2002 %d.%b.%Y ``DD mmm YYYY`` 21 Apr 2002 %d %b %Y ``mmm DD YYYY`` Apr 21 2002 %b %d %Y ``Mmmmm DD YYYY`` April 21 2002 %B %d %Y ================= ============== =============== .. note:: - The time needs to follow this definition without exception: `%H:%M:%S.%f`. A complete date and time should therefore look like this:: 2002-04-21 15:29:37.522 - Be aware that in a file with comma separated values you should not use a date format that contains commas. """ if isinstance(datestr, float) or isinstance(datestr, int): return None seasonal_key = str(config.get('DEFAULT', 'seasonal_key', '9999')) #replace 'T' with space to handle ISO times. if datestr.find('T') > 0: dt_delim = 'T' else: dt_delim = ' ' delimiters = ['-', '.', ' ', '/'] formatstrings = [['%Y', '%m', '%d'], ['%d', '%m', '%Y'], ['%d', '%b', '%Y'], ['XXXX', '%m', '%d'], ['%d', '%m', 'XXXX'], ['%d', '%b', 'XXXX'], [seasonal_key, '%m', '%d'], ['%d', '%m', seasonal_key], ['%d', '%b', seasonal_key]] timeformats = ['%H:%M:%S.%f', '%H:%M:%S', '%H:%M', '%H:%M:%S.%f000Z', '%H:%M:%S.%fZ'] # Check if a time is indicated or not for timefmt in timeformats: try: datetime.strptime(datestr.split(dt_delim)[-1].strip(), timefmt) usetime = True break except ValueError: usetime = False # Check the simple ones: for fmt in formatstrings: for delim in delimiters: datefmt = fmt[0] + delim + fmt[1] + delim + fmt[2] if usetime: for timefmt in timeformats: complfmt = datefmt + dt_delim + timefmt try: datetime.strptime(datestr, complfmt) return complfmt except ValueError: pass else: try: datetime.strptime(datestr, datefmt) return datefmt except ValueError: pass # Check for other formats: custom_formats = ['%d/%m/%Y', '%b %d %Y', '%B %d %Y','%d/%m/XXXX', '%d/%m/'+seasonal_key] for fmt in custom_formats: if usetime: for timefmt in timeformats: complfmt = fmt + dt_delim + timefmt try: datetime.strptime(datestr, complfmt) return complfmt except ValueError: pass else: try: datetime.strptime(datestr, fmt) return fmt except ValueError: pass return None
Try to guess the format a date is written in. The following formats are supported: ================= ============== =============== Format Example Python format ----------------- -------------- --------------- ``YYYY-MM-DD`` 2002-04-21 %Y-%m-%d ``YYYY.MM.DD`` 2002.04.21 %Y.%m.%d ``YYYY MM DD`` 2002 04 21 %Y %m %d ``DD-MM-YYYY`` 21-04-2002 %d-%m-%Y ``DD.MM.YYYY`` 21.04.2002 %d.%m.%Y ``DD MM YYYY`` 21 04 2002 %d %m %Y ``DD/MM/YYYY`` 21/04/2002 %d/%m/%Y ================= ============== =============== These formats can also be used for seasonal (yearly recurring) time series. The year needs to be replaced by ``9999`` or another configurable year representing the seasonal year.. The following formats are recognised depending on your locale setting. There is no guarantee that this will work. ================= ============== =============== Format Example Python format ----------------- -------------- --------------- ``DD-mmm-YYYY`` 21-Apr-2002 %d-%b-%Y ``DD.mmm.YYYY`` 21.Apr.2002 %d.%b.%Y ``DD mmm YYYY`` 21 Apr 2002 %d %b %Y ``mmm DD YYYY`` Apr 21 2002 %b %d %Y ``Mmmmm DD YYYY`` April 21 2002 %B %d %Y ================= ============== =============== .. note:: - The time needs to follow this definition without exception: `%H:%M:%S.%f`. A complete date and time should therefore look like this:: 2002-04-21 15:29:37.522 - Be aware that in a file with comma separated values you should not use a date format that contains commas.
entailment
def reindex_timeseries(ts_string, new_timestamps): """ get data for timesamp :param a JSON string, in pandas-friendly format :param a timestamp or list of timestamps (datetimes) :returns a pandas data frame, reindexed with the supplied timestamos or None if no data is found """ #If a single timestamp is passed in, turn it into a list #Reindexing can't work if it's not a list if not isinstance(new_timestamps, list): new_timestamps = [new_timestamps] #Convert the incoming timestamps to datetimes #if they are not datetimes. new_timestamps_converted = [] for t in new_timestamps: new_timestamps_converted.append(get_datetime(t)) new_timestamps = new_timestamps_converted seasonal_year = config.get('DEFAULT','seasonal_year', '1678') seasonal_key = config.get('DEFAULT', 'seasonal_key', '9999') ts = ts_string.replace(seasonal_key, seasonal_year) timeseries = pd.read_json(ts) idx = timeseries.index ts_timestamps = new_timestamps #'Fix' the incoming timestamp in case it's a seasonal value if type(idx) == pd.DatetimeIndex: if set(idx.year) == set([int(seasonal_year)]): if isinstance(new_timestamps, list): seasonal_timestamp = [] for t in ts_timestamps: t_1900 = t.replace(year=int(seasonal_year)) seasonal_timestamp.append(t_1900) ts_timestamps = seasonal_timestamp #Reindex the timeseries to reflect the requested timestamps reindexed_ts = timeseries.reindex(ts_timestamps, method='ffill') i = reindexed_ts.index reindexed_ts.index = pd.Index(new_timestamps, names=i.names) #If there are no values at all, just return None if len(reindexed_ts.dropna()) == 0: return None #Replace all numpy NAN values with None pandas_ts = reindexed_ts.where(reindexed_ts.notnull(), None) return pandas_ts
get data for timesamp :param a JSON string, in pandas-friendly format :param a timestamp or list of timestamps (datetimes) :returns a pandas data frame, reindexed with the supplied timestamos or None if no data is found
entailment
def parse_time_step(time_step, target='s', units_ref=None): """ Read in the time step and convert it to seconds. """ log.info("Parsing time step %s", time_step) # export numerical value from string using regex value = re.findall(r'\d+', time_step)[0] valuelen = len(value) try: value = float(value) except: HydraPluginError("Unable to extract number of time steps (%s) from time step %s" % (value, time_step)) unit = time_step[valuelen:].strip() period = get_time_period(unit) log.info("Time period is %s", period) converted_time_step = units_ref.convert(value, period, target) log.info("Time period is %s %s", converted_time_step, period) return float(converted_time_step), value, period
Read in the time step and convert it to seconds.
entailment
def get_time_axis(start_time, end_time, time_step, time_axis=None): """ Create a list of datetimes based on an start time, end time and time step. If such a list is already passed in, then this is not necessary. Often either the start_time, end_time, time_step is passed into an app or the time_axis is passed in directly. This function returns a time_axis in both situations. """ #Do this import here to avoid a circular dependency from ..lib import units if time_axis is not None: actual_dates_axis = [] for t in time_axis: #If the user has entered the time_axis with commas, remove them. t = t.replace(',', '').strip() if t == '': continue actual_dates_axis.append(get_datetime(t)) return actual_dates_axis else: if start_time is None: raise HydraPluginError("A start time must be specified") if end_time is None: raise HydraPluginError("And end time must be specified") if time_step is None: raise HydraPluginError("A time-step must be specified") start_date = get_datetime(start_time) end_date = get_datetime(end_time) delta_t, value, output_units = parse_time_step(time_step, units_ref=units) time_axis = [start_date] value = int(value) while start_date < end_date: #Months and years are a special case, so treat them differently if(output_units.lower() == "mon"): start_date = start_date + relativedelta(months=value) elif (output_units.lower() == "yr"): start_date = start_date + relativedelta(years=value) else: start_date += timedelta(seconds=delta_t) time_axis.append(start_date) return time_axis
Create a list of datetimes based on an start time, end time and time step. If such a list is already passed in, then this is not necessary. Often either the start_time, end_time, time_step is passed into an app or the time_axis is passed in directly. This function returns a time_axis in both situations.
entailment
def _get_all_attributes(network): """ Get all the complex mode attributes in the network so that they can be used for mapping to resource scenarios later. """ attrs = network.attributes for n in network.nodes: attrs.extend(n.attributes) for l in network.links: attrs.extend(l.attributes) for g in network.resourcegroups: attrs.extend(g.attributes) return attrs
Get all the complex mode attributes in the network so that they can be used for mapping to resource scenarios later.
entailment
def add_network(network,**kwargs): """ Takes an entire network complex model and saves it to the DB. This complex model includes links & scenarios (with resource data). Returns the network's complex model. As links connect two nodes using the node_ids, if the nodes are new they will not yet have node_ids. In this case, use negative ids as temporary IDS until the node has been given an permanent ID. All inter-object referencing of new objects should be done using negative IDs in the client. The returned object will have positive IDS """ db.DBSession.autoflush = False start_time = datetime.datetime.now() log.debug("Adding network") insert_start = datetime.datetime.now() proj_i = db.DBSession.query(Project).filter(Project.id == network.project_id).first() if proj_i is None: raise HydraError("Project ID is none. A project ID must be specified on the Network") existing_net = db.DBSession.query(Network).filter(Network.project_id == network.project_id, Network.name==network.name).first() if existing_net is not None: raise HydraError("A network with the name %s is already in project %s"%(network.name, network.project_id)) user_id = kwargs.get('user_id') proj_i.check_write_permission(user_id) net_i = Network() net_i.project_id = network.project_id net_i.name = network.name net_i.description = network.description net_i.created_by = user_id net_i.projection = network.projection if network.layout is not None: net_i.layout = network.get_layout() network.id = net_i.id db.DBSession.add(net_i) db.DBSession.flush() #These two lists are used for comparison and lookup, so when #new attributes are added, these lists are extended. #List of all the resource attributes all_resource_attrs = {} name_map = {network.name:net_i} network_attrs, network_defaults = _bulk_add_resource_attrs(net_i.id, 'NETWORK', [network], name_map) hdb.add_resource_types(net_i, network.types) all_resource_attrs.update(network_attrs) log.info("Network attributes added in %s", get_timing(start_time)) node_id_map, node_attrs, node_datasets = _add_nodes(net_i, network.nodes) all_resource_attrs.update(node_attrs) link_id_map, link_attrs, link_datasets = _add_links(net_i, network.links, node_id_map) all_resource_attrs.update(link_attrs) grp_id_map, grp_attrs, grp_datasets = _add_resource_groups(net_i, network.resourcegroups) all_resource_attrs.update(grp_attrs) defaults = list(grp_datasets.values()) + list(link_datasets.values()) \ + list(node_datasets.values()) + list(network_defaults.values()) start_time = datetime.datetime.now() scenario_names = [] if network.scenarios is not None: log.info("Adding scenarios to network") for s in network.scenarios: if s.name in scenario_names: raise HydraError("Duplicate scenario name: %s"%(s.name)) scen = Scenario() scen.name = s.name scen.description = s.description scen.layout = s.get_layout() scen.start_time = str(timestamp_to_ordinal(s.start_time)) if s.start_time else None scen.end_time = str(timestamp_to_ordinal(s.end_time)) if s.end_time else None scen.time_step = s.time_step scen.created_by = user_id scenario_names.append(s.name) #extract the data from each resourcescenario incoming_datasets = [] scenario_resource_attrs = [] for r_scen in s.resourcescenarios: ra = all_resource_attrs[r_scen.resource_attr_id] incoming_datasets.append(r_scen.dataset) scenario_resource_attrs.append(ra) data_start_time = datetime.datetime.now() for default in defaults: scen.add_resource_scenario(JSONObject(default), JSONObject({'id':default['dataset_id']}), source=kwargs.get('app_name')) datasets = data._bulk_insert_data( incoming_datasets, user_id, kwargs.get('app_name') ) log.info("Data bulk insert took %s", get_timing(data_start_time)) ra_start_time = datetime.datetime.now() for i, ra in enumerate(scenario_resource_attrs): scen.add_resource_scenario(ra, datasets[i], source=kwargs.get('app_name')) log.info("Resource scenarios added in %s", get_timing(ra_start_time)) item_start_time = datetime.datetime.now() if s.resourcegroupitems is not None: for group_item in s.resourcegroupitems: group_item_i = ResourceGroupItem() group_item_i.group = grp_id_map[group_item.group_id] group_item_i.ref_key = group_item.ref_key if group_item.ref_key == 'NODE': group_item_i.node = node_id_map[group_item.ref_id] elif group_item.ref_key == 'LINK': group_item_i.link = link_id_map[group_item.ref_id] elif group_item.ref_key == 'GROUP': group_item_i.subgroup = grp_id_map[group_item.ref_id] else: raise HydraError("A ref key of %s is not valid for a " "resource group item.",\ group_item.ref_key) scen.resourcegroupitems.append(group_item_i) log.info("Group items insert took %s", get_timing(item_start_time)) net_i.scenarios.append(scen) log.info("Scenarios added in %s", get_timing(start_time)) net_i.set_owner(user_id) db.DBSession.flush() log.info("Insertion of network took: %s",(datetime.datetime.now()-insert_start)) return net_i
Takes an entire network complex model and saves it to the DB. This complex model includes links & scenarios (with resource data). Returns the network's complex model. As links connect two nodes using the node_ids, if the nodes are new they will not yet have node_ids. In this case, use negative ids as temporary IDS until the node has been given an permanent ID. All inter-object referencing of new objects should be done using negative IDs in the client. The returned object will have positive IDS
entailment
def _get_all_resource_attributes(network_id, template_id=None): """ Get all the attributes for the nodes, links and groups of a network. Return these attributes as a dictionary, keyed on type (NODE, LINK, GROUP) then by ID of the node or link. """ base_qry = db.DBSession.query( ResourceAttr.id.label('id'), ResourceAttr.ref_key.label('ref_key'), ResourceAttr.cr_date.label('cr_date'), ResourceAttr.attr_is_var.label('attr_is_var'), ResourceAttr.node_id.label('node_id'), ResourceAttr.link_id.label('link_id'), ResourceAttr.group_id.label('group_id'), ResourceAttr.network_id.label('network_id'), ResourceAttr.attr_id.label('attr_id'), Attr.name.label('name'), Attr.dimension_id.label('dimension_id'), ).filter(Attr.id==ResourceAttr.attr_id) all_node_attribute_qry = base_qry.join(Node).filter(Node.network_id==network_id) all_link_attribute_qry = base_qry.join(Link).filter(Link.network_id==network_id) all_group_attribute_qry = base_qry.join(ResourceGroup).filter(ResourceGroup.network_id==network_id) network_attribute_qry = base_qry.filter(ResourceAttr.network_id==network_id) #Filter the group attributes by template if template_id is not None: all_node_attribute_qry = all_node_attribute_qry.join(ResourceType).join(TemplateType).join(TypeAttr).filter(TemplateType.template_id==template_id).filter(ResourceAttr.attr_id==TypeAttr.attr_id) all_link_attribute_qry = all_link_attribute_qry.join(ResourceType).join(TemplateType).join(TypeAttr).filter(TemplateType.template_id==template_id).filter(ResourceAttr.attr_id==TypeAttr.attr_id) all_group_attribute_qry = all_group_attribute_qry.join(ResourceType).join(TemplateType).join(TypeAttr).filter(TemplateType.template_id==template_id).filter(ResourceAttr.attr_id==TypeAttr.attr_id) network_attribute_qry = network_attribute_qry.join(ResourceType, ResourceAttr.network_id==ResourceType.network_id).join(TemplateType).join(TypeAttr).filter(TemplateType.template_id==template_id).filter(ResourceAttr.attr_id==TypeAttr.attr_id) x = time.time() logging.info("Getting all attributes using execute") attribute_qry = all_node_attribute_qry.union(all_link_attribute_qry, all_group_attribute_qry, network_attribute_qry) all_attributes = db.DBSession.execute(attribute_qry.statement).fetchall() log.info("%s attrs retrieved in %s", len(all_attributes), time.time()-x) logging.info("Attributes retrieved. Processing results...") x = time.time() node_attr_dict = dict() link_attr_dict = dict() group_attr_dict = dict() network_attr_dict = dict() for attr in all_attributes: if attr.ref_key == 'NODE': nodeattr = node_attr_dict.get(attr.node_id, []) nodeattr.append(attr) node_attr_dict[attr.node_id] = nodeattr elif attr.ref_key == 'LINK': linkattr = link_attr_dict.get(attr.link_id, []) linkattr.append(attr) link_attr_dict[attr.link_id] = linkattr elif attr.ref_key == 'GROUP': groupattr = group_attr_dict.get(attr.group_id, []) groupattr.append(attr) group_attr_dict[attr.group_id] = groupattr elif attr.ref_key == 'NETWORK': networkattr = network_attr_dict.get(attr.network_id, []) networkattr.append(attr) network_attr_dict[attr.network_id] = networkattr all_attributes = { 'NODE' : node_attr_dict, 'LINK' : link_attr_dict, 'GROUP': group_attr_dict, 'NETWORK': network_attr_dict, } logging.info("Attributes processed in %s", time.time()-x) return all_attributes
Get all the attributes for the nodes, links and groups of a network. Return these attributes as a dictionary, keyed on type (NODE, LINK, GROUP) then by ID of the node or link.
entailment
def _get_all_templates(network_id, template_id): """ Get all the templates for the nodes, links and groups of a network. Return these templates as a dictionary, keyed on type (NODE, LINK, GROUP) then by ID of the node or link. """ base_qry = db.DBSession.query( ResourceType.ref_key.label('ref_key'), ResourceType.node_id.label('node_id'), ResourceType.link_id.label('link_id'), ResourceType.group_id.label('group_id'), ResourceType.network_id.label('network_id'), Template.name.label('template_name'), Template.id.label('template_id'), TemplateType.id.label('type_id'), TemplateType.layout.label('layout'), TemplateType.name.label('type_name'), ).filter(TemplateType.id==ResourceType.type_id, Template.id==TemplateType.template_id) all_node_type_qry = base_qry.filter(Node.id==ResourceType.node_id, Node.network_id==network_id) all_link_type_qry = base_qry.filter(Link.id==ResourceType.link_id, Link.network_id==network_id) all_group_type_qry = base_qry.filter(ResourceGroup.id==ResourceType.group_id, ResourceGroup.network_id==network_id) network_type_qry = base_qry.filter(ResourceType.network_id==network_id) #Filter the group attributes by template if template_id is not None: all_node_type_qry = all_node_type_qry.filter(Template.id==template_id) all_link_type_qry = all_link_type_qry.filter(Template.id==template_id) all_group_type_qry = all_group_type_qry.filter(Template.id==template_id) x = time.time() log.info("Getting all types") type_qry = all_node_type_qry.union(all_link_type_qry, all_group_type_qry, network_type_qry) all_types = db.DBSession.execute(type_qry.statement).fetchall() log.info("%s types retrieved in %s", len(all_types), time.time()-x) log.info("Attributes retrieved. Processing results...") x = time.time() node_type_dict = dict() link_type_dict = dict() group_type_dict = dict() network_type_dict = dict() for t in all_types: templatetype = JSONObject({ 'template_id':t.template_id, 'id':t.type_id, 'template_name':t.template_name, 'layout': t.layout, 'name': t.type_name,}) if t.ref_key == 'NODE': nodetype = node_type_dict.get(t.node_id, []) nodetype.append(templatetype) node_type_dict[t.node_id] = nodetype elif t.ref_key == 'LINK': linktype = link_type_dict.get(t.link_id, []) linktype.append(templatetype) link_type_dict[t.link_id] = linktype elif t.ref_key == 'GROUP': grouptype = group_type_dict.get(t.group_id, []) grouptype.append(templatetype) group_type_dict[t.group_id] = grouptype elif t.ref_key == 'NETWORK': nettype = network_type_dict.get(t.network_id, []) nettype.append(templatetype) network_type_dict[t.network_id] = nettype all_types = { 'NODE' : node_type_dict, 'LINK' : link_type_dict, 'GROUP': group_type_dict, 'NETWORK': network_type_dict, } logging.info("Attributes processed in %s", time.time()-x) return all_types
Get all the templates for the nodes, links and groups of a network. Return these templates as a dictionary, keyed on type (NODE, LINK, GROUP) then by ID of the node or link.
entailment
def _get_all_group_items(network_id): """ Get all the resource group items in the network, across all scenarios returns a dictionary of dict objects, keyed on scenario_id """ base_qry = db.DBSession.query(ResourceGroupItem) item_qry = base_qry.join(Scenario).filter(Scenario.network_id==network_id) x = time.time() logging.info("Getting all items") all_items = db.DBSession.execute(item_qry.statement).fetchall() log.info("%s groups jointly retrieved in %s", len(all_items), time.time()-x) logging.info("items retrieved. Processing results...") x = time.time() item_dict = dict() for item in all_items: items = item_dict.get(item.scenario_id, []) items.append(item) item_dict[item.scenario_id] = items logging.info("items processed in %s", time.time()-x) return item_dict
Get all the resource group items in the network, across all scenarios returns a dictionary of dict objects, keyed on scenario_id
entailment
def _get_all_resourcescenarios(network_id, user_id): """ Get all the resource scenarios in a network, across all scenarios returns a dictionary of dict objects, keyed on scenario_id """ rs_qry = db.DBSession.query( Dataset.type, Dataset.unit_id, Dataset.name, Dataset.hash, Dataset.cr_date, Dataset.created_by, Dataset.hidden, Dataset.value, ResourceScenario.dataset_id, ResourceScenario.scenario_id, ResourceScenario.resource_attr_id, ResourceScenario.source, ResourceAttr.attr_id, ).outerjoin(DatasetOwner, and_(DatasetOwner.dataset_id==Dataset.id, DatasetOwner.user_id==user_id)).filter( or_(Dataset.hidden=='N', Dataset.created_by==user_id, DatasetOwner.user_id != None), ResourceAttr.id == ResourceScenario.resource_attr_id, Scenario.id==ResourceScenario.scenario_id, Scenario.network_id==network_id, Dataset.id==ResourceScenario.dataset_id) x = time.time() logging.info("Getting all resource scenarios") all_rs = db.DBSession.execute(rs_qry.statement).fetchall() log.info("%s resource scenarios retrieved in %s", len(all_rs), time.time()-x) logging.info("resource scenarios retrieved. Processing results...") x = time.time() rs_dict = dict() for rs in all_rs: rs_obj = JSONObject(rs) rs_attr = JSONObject({'attr_id':rs.attr_id}) value = rs.value rs_dataset = JSONDataset({ 'id':rs.dataset_id, 'type' : rs.type, 'unit_id' : rs.unit_id, 'name' : rs.name, 'hash' : rs.hash, 'cr_date':rs.cr_date, 'created_by':rs.created_by, 'hidden':rs.hidden, 'value':value, 'metadata':{}, }) rs_obj.resourceattr = rs_attr rs_obj.value = rs_dataset rs_obj.dataset = rs_dataset scenario_rs = rs_dict.get(rs.scenario_id, []) scenario_rs.append(rs_obj) rs_dict[rs.scenario_id] = scenario_rs logging.info("resource scenarios processed in %s", time.time()-x) return rs_dict
Get all the resource scenarios in a network, across all scenarios returns a dictionary of dict objects, keyed on scenario_id
entailment
def _get_metadata(network_id, user_id): """ Get all the metadata in a network, across all scenarios returns a dictionary of dict objects, keyed on dataset ID """ log.info("Getting Metadata") dataset_qry = db.DBSession.query( Dataset ).outerjoin(DatasetOwner, and_(DatasetOwner.dataset_id==Dataset.id, DatasetOwner.user_id==user_id)).filter( or_(Dataset.hidden=='N', DatasetOwner.user_id != None), Scenario.id==ResourceScenario.scenario_id, Scenario.network_id==network_id, Dataset.id==ResourceScenario.dataset_id).distinct().subquery() rs_qry = db.DBSession.query( Metadata ).join(dataset_qry, Metadata.dataset_id==dataset_qry.c.id) x = time.time() logging.info("Getting all matadata") all_metadata = db.DBSession.execute(rs_qry.statement).fetchall() log.info("%s metadata jointly retrieved in %s",len(all_metadata), time.time()-x) logging.info("metadata retrieved. Processing results...") x = time.time() metadata_dict = dict() for m in all_metadata: if metadata_dict.get(m.dataset_id): metadata_dict[m.dataset_id][m.key] = six.text_type(m.value) else: metadata_dict[m.dataset_id] = {m.key : six.text_type(m.value)} logging.info("metadata processed in %s", time.time()-x) return metadata_dict
Get all the metadata in a network, across all scenarios returns a dictionary of dict objects, keyed on dataset ID
entailment
def _get_network_owners(network_id): """ Get all the nodes in a network """ owners_i = db.DBSession.query(NetworkOwner).filter( NetworkOwner.network_id==network_id).options(noload('network')).options(joinedload_all('user')).all() owners = [JSONObject(owner_i) for owner_i in owners_i] return owners
Get all the nodes in a network
entailment
def _get_nodes(network_id, template_id=None): """ Get all the nodes in a network """ extras = {'types':[], 'attributes':[]} node_qry = db.DBSession.query(Node).filter( Node.network_id==network_id, Node.status=='A').options( noload('network') ) if template_id is not None: node_qry = node_qry.filter(ResourceType.node_id==Node.id, TemplateType.id==ResourceType.type_id, TemplateType.template_id==template_id) node_res = db.DBSession.execute(node_qry.statement).fetchall() nodes = [] for n in node_res: nodes.append(JSONObject(n, extras=extras)) return nodes
Get all the nodes in a network
entailment
def _get_links(network_id, template_id=None): """ Get all the links in a network """ extras = {'types':[], 'attributes':[]} link_qry = db.DBSession.query(Link).filter( Link.network_id==network_id, Link.status=='A').options( noload('network') ) if template_id is not None: link_qry = link_qry.filter(ResourceType.link_id==Link.id, TemplateType.id==ResourceType.type_id, TemplateType.template_id==template_id) link_res = db.DBSession.execute(link_qry.statement).fetchall() links = [] for l in link_res: links.append(JSONObject(l, extras=extras)) return links
Get all the links in a network
entailment
def _get_groups(network_id, template_id=None): """ Get all the resource groups in a network """ extras = {'types':[], 'attributes':[]} group_qry = db.DBSession.query(ResourceGroup).filter( ResourceGroup.network_id==network_id, ResourceGroup.status=='A').options( noload('network') ) if template_id is not None: group_qry = group_qry.filter(ResourceType.group_id==ResourceGroup.id, TemplateType.id==ResourceType.type_id, TemplateType.template_id==template_id) group_res = db.DBSession.execute(group_qry.statement).fetchall() groups = [] for g in group_res: groups.append(JSONObject(g, extras=extras)) return groups
Get all the resource groups in a network
entailment
def _get_scenarios(network_id, include_data, user_id, scenario_ids=None): """ Get all the scenarios in a network """ scen_qry = db.DBSession.query(Scenario).filter( Scenario.network_id == network_id).options( noload('network')).filter( Scenario.status == 'A') if scenario_ids: logging.info("Filtering by scenario_ids %s",scenario_ids) scen_qry = scen_qry.filter(Scenario.id.in_(scenario_ids)) extras = {'resourcescenarios': [], 'resourcegroupitems': []} scens = [JSONObject(s,extras=extras) for s in db.DBSession.execute(scen_qry.statement).fetchall()] all_resource_group_items = _get_all_group_items(network_id) if include_data == 'Y' or include_data == True: all_rs = _get_all_resourcescenarios(network_id, user_id) metadata = _get_metadata(network_id, user_id) for s in scens: s.resourcegroupitems = all_resource_group_items.get(s.id, []) if include_data == 'Y' or include_data == True: s.resourcescenarios = all_rs.get(s.id, []) for rs in s.resourcescenarios: rs.dataset.metadata = metadata.get(rs.dataset_id, {}) return scens
Get all the scenarios in a network
entailment
def get_network(network_id, summary=False, include_data='N', scenario_ids=None, template_id=None, **kwargs): """ Return a whole network as a dictionary. network_id: ID of the network to retrieve include_data: 'Y' or 'N'. Indicate whether scenario data is to be returned. This has a significant speed impact as retrieving large amounts of data can be expensive. scenario_ids: list of IDS to be returned. Used if a network has multiple scenarios but you only want one returned. Using this filter will speed up this function call. template_id: Return the network with only attributes associated with this template on the network, groups, nodes and links. """ log.debug("getting network %s"%network_id) user_id = kwargs.get('user_id') network_id = int(network_id) try: log.debug("Querying Network %s", network_id) net_i = db.DBSession.query(Network).filter( Network.id == network_id).options( noload('scenarios')).options( noload('nodes')).options( noload('links')).options( noload('types')).options( noload('attributes')).options( noload('resourcegroups')).one() net_i.check_read_permission(user_id) net = JSONObject(net_i) net.nodes = _get_nodes(network_id, template_id=template_id) net.links = _get_links(network_id, template_id=template_id) net.resourcegroups = _get_groups(network_id, template_id=template_id) net.owners = _get_network_owners(network_id) if summary is False: all_attributes = _get_all_resource_attributes(network_id, template_id) log.info("Setting attributes") net.attributes = all_attributes['NETWORK'].get(network_id, []) for node_i in net.nodes: node_i.attributes = all_attributes['NODE'].get(node_i.id, []) log.info("Node attributes set") for link_i in net.links: link_i.attributes = all_attributes['LINK'].get(link_i.id, []) log.info("Link attributes set") for group_i in net.resourcegroups: group_i.attributes = all_attributes['GROUP'].get(group_i.id, []) log.info("Group attributes set") log.info("Setting types") all_types = _get_all_templates(network_id, template_id) net.types = all_types['NETWORK'].get(network_id, []) for node_i in net.nodes: node_i.types = all_types['NODE'].get(node_i.id, []) for link_i in net.links: link_i.types = all_types['LINK'].get(link_i.id, []) for group_i in net.resourcegroups: group_i.types = all_types['GROUP'].get(group_i.id, []) log.info("Getting scenarios") net.scenarios = _get_scenarios(network_id, include_data, user_id, scenario_ids) except NoResultFound: raise ResourceNotFoundError("Network (network_id=%s) not found." % network_id) return net
Return a whole network as a dictionary. network_id: ID of the network to retrieve include_data: 'Y' or 'N'. Indicate whether scenario data is to be returned. This has a significant speed impact as retrieving large amounts of data can be expensive. scenario_ids: list of IDS to be returned. Used if a network has multiple scenarios but you only want one returned. Using this filter will speed up this function call. template_id: Return the network with only attributes associated with this template on the network, groups, nodes and links.
entailment
def get_nodes(network_id, template_id=None, **kwargs): """ Get all the nodes in a network. args: network_id (int): The network in which to search template_id (int): Only return nodes whose type is in this template. """ user_id = kwargs.get('user_id') try: net_i = db.DBSession.query(Network).filter(Network.id == network_id).one() net_i.check_read_permission(user_id=user_id) except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) node_qry = db.DBSession.query(Node).filter( Node.network_id==network_id, Node.status=='A').options( noload('network') ).options( joinedload_all('types.templatetype') ).options( joinedload_all('attributes.attr') ) if template_id is not None: node_qry = node_qry.filter(ResourceType.node_id==Node.id, TemplateType.id==ResourceType.type_id, TemplateType.template_id==template_id) nodes = node_qry.all() return nodes
Get all the nodes in a network. args: network_id (int): The network in which to search template_id (int): Only return nodes whose type is in this template.
entailment
def get_links(network_id, template_id=None, **kwargs): """ Get all the links in a network. args: network_id (int): The network in which to search template_id (int): Only return links whose type is in this template. """ user_id = kwargs.get('user_id') try: net_i = db.DBSession.query(Network).filter(Network.id == network_id).one() net_i.check_read_permission(user_id=user_id) except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) link_qry = db.DBSession.query(Link).filter( Link.network_id==network_id, Link.status=='A').options( noload('network') ).options( joinedload_all('types.templatetype') ).options( joinedload_all('attributes.attr') ) if template_id is not None: link_qry = link_qry.filter(ResourceType.link_id==Link.id, TemplateType.id==ResourceType.type_id, TemplateType.template_id==template_id) links = link_qry.all() return links
Get all the links in a network. args: network_id (int): The network in which to search template_id (int): Only return links whose type is in this template.
entailment
def get_groups(network_id, template_id=None, **kwargs): """ Get all the resource groups in a network. args: network_id (int): The network in which to search template_id (int): Only return resource groups whose type is in this template. """ user_id = kwargs.get('user_id') try: net_i = db.DBSession.query(Network).filter(Network.id == network_id).one() net_i.check_read_permission(user_id=user_id) except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) group_qry = db.DBSession.query(ResourceGroup).filter( ResourceGroup.network_id==network_id, ResourceGroup.status=='A').options( noload('network') ).options( joinedload_all('types.templatetype') ).options( joinedload_all('attributes.attr') ) if template_id is not None: group_qry = group_qry.filter(ResourceType.group_id==ResourceGroup.id, TemplateType.id==ResourceType.type_id, TemplateType.template_id==template_id) groups = group_qry.all() return groups
Get all the resource groups in a network. args: network_id (int): The network in which to search template_id (int): Only return resource groups whose type is in this template.
entailment
def get_network_by_name(project_id, network_name,**kwargs): """ Return a whole network as a complex model. """ try: res = db.DBSession.query(Network.id).filter(func.lower(Network.name).like(network_name.lower()), Network.project_id == project_id).one() net = get_network(res.id, 'Y', None, **kwargs) return net except NoResultFound: raise ResourceNotFoundError("Network with name %s not found"%(network_name))
Return a whole network as a complex model.
entailment
def network_exists(project_id, network_name,**kwargs): """ Return a whole network as a complex model. """ try: db.DBSession.query(Network.id).filter(func.lower(Network.name).like(network_name.lower()), Network.project_id == project_id).one() return 'Y' except NoResultFound: return 'N'
Return a whole network as a complex model.
entailment
def update_network(network, update_nodes = True, update_links = True, update_groups = True, update_scenarios = True, **kwargs): """ Update an entire network """ log.info("Updating Network %s", network.name) user_id = kwargs.get('user_id') #check_perm('update_network') try: net_i = db.DBSession.query(Network).filter(Network.id == network.id).one() except NoResultFound: raise ResourceNotFoundError("Network with id %s not found"%(network.id)) net_i.project_id = network.project_id net_i.name = network.name net_i.description = network.description net_i.projection = network.projection net_i.layout = network.get_layout() all_resource_attrs = {} new_network_attributes = _update_attributes(net_i, network.attributes) all_resource_attrs.update(new_network_attributes) hdb.add_resource_types(net_i, network.types) #Maps temporary node_ids to real node_ids node_id_map = dict() if network.nodes is not None and update_nodes is True: log.info("Updating nodes") t0 = time.time() #First add all the nodes node_id_map = dict([(n.id, n) for n in net_i.nodes]) for node in network.nodes: #If we get a negative or null node id, we know #it is a new node. if node.id is not None and node.id > 0: n = node_id_map[node.id] n.name = node.name n.description = node.description n.x = node.x n.y = node.y n.status = node.status n.layout = node.get_layout() else: log.info("Adding new node %s", node.name) n = net_i.add_node(node.name, node.description, node.get_layout(), node.x, node.y) net_i.nodes.append(n) node_id_map[n.id] = n all_resource_attrs.update(_update_attributes(n, node.attributes)) hdb.add_resource_types(n, node.types) log.info("Updating nodes took %s", time.time() - t0) link_id_map = dict() if network.links is not None and update_links is True: log.info("Updating links") t0 = time.time() link_id_map = dict([(l.link_id, l) for l in net_i.links]) for link in network.links: node_1 = node_id_map[link.node_1_id] node_2 = node_id_map[link.node_2_id] if link.id is None or link.id < 0: log.info("Adding new link %s", link.name) l = net_i.add_link(link.name, link.description, link.get_layout(), node_1, node_2) net_i.links.append(l) link_id_map[link.id] = l else: l = link_id_map[link.id] l.name = link.name l.link_descripion = link.description l.node_a = node_1 l.node_b = node_2 l.layout = link.get_layout() all_resource_attrs.update(_update_attributes(l, link.attributes)) hdb.add_resource_types(l, link.types) log.info("Updating links took %s", time.time() - t0) group_id_map = dict() #Next all the groups if network.resourcegroups is not None and update_groups is True: log.info("Updating groups") t0 = time.time() group_id_map = dict([(g.group_id, g) for g in net_i.resourcegroups]) for group in network.resourcegroups: #If we get a negative or null group id, we know #it is a new group. if group.id is not None and group.id > 0: g_i = group_id_map[group.id] g_i.name = group.name g_i.description = group.description g_i.status = group.status else: log.info("Adding new group %s", group.name) g_i = net_i.add_group(group.name, group.description, group.status) net_i.resourcegroups.append(net_i) group_id_map[g_i.group_id] = g_i all_resource_attrs.update(_update_attributes(g_i, group.attributes)) hdb.add_resource_types(g_i, group.types) group_id_map[group.id] = g_i log.info("Updating groups took %s", time.time() - t0) errors = [] if network.scenarios is not None and update_scenarios is True: for s in network.scenarios: add_scenario = False if s.id is not None: if s.id > 0: try: scen_i = db.DBSession.query(Scenario).filter(Scenario.id==s.id).one() if scen_i.locked == 'Y': errors.append('Scenario %s was not updated as it is locked'%(s.id)) continue scenario.update_scenario(s, flush=False, **kwargs) except NoResultFound: raise ResourceNotFoundError("Scenario %s not found"%(s.id)) else: add_scenario = True else: add_scenario = True if add_scenario is True: log.info("Adding new scenario %s to network", s.name) scenario.add_scenario(network.id, s, **kwargs) db.DBSession.flush() updated_net = get_network(network.id, summary=True, **kwargs) return updated_net
Update an entire network
entailment
def set_network_status(network_id,status,**kwargs): """ Activates a network by setting its status attribute to 'A'. """ user_id = kwargs.get('user_id') #check_perm(user_id, 'delete_network') try: net_i = db.DBSession.query(Network).filter(Network.id == network_id).one() net_i.check_write_permission(user_id) net_i.status = status except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) db.DBSession.flush() return 'OK'
Activates a network by setting its status attribute to 'A'.
entailment
def get_network_extents(network_id,**kwargs): """ Given a network, return its maximum extents. This would be the minimum x value of all nodes, the minimum y value of all nodes, the maximum x value of all nodes and maximum y value of all nodes. @returns NetworkExtents object """ rs = db.DBSession.query(Node.x, Node.y).filter(Node.network_id==network_id).all() if len(rs) == 0: return dict( network_id = network_id, min_x=None, max_x=None, min_y=None, max_y=None, ) # Compute min/max extent of the network. x = [r.x for r in rs if r.x is not None] if len(x) > 0: x_min = min(x) x_max = max(x) else: # Default x extent if all None values x_min, x_max = 0, 1 y = [r.y for r in rs if r.y is not None] if len(y) > 0: y_min = min(y) y_max = max(y) else: # Default y extent if all None values y_min, y_max = 0, 1 ne = JSONObject(dict( network_id = network_id, min_x=x_min, max_x=x_max, min_y=y_min, max_y=y_max, )) return ne
Given a network, return its maximum extents. This would be the minimum x value of all nodes, the minimum y value of all nodes, the maximum x value of all nodes and maximum y value of all nodes. @returns NetworkExtents object
entailment
def add_nodes(network_id, nodes,**kwargs): """ Add nodes to network """ start_time = datetime.datetime.now() names=[] # used to check uniqueness of node name for n_i in nodes: if n_i.name in names: raise HydraError("Duplicate Node Name: %s"%(n_i.name)) names.append(n_i.name) user_id = kwargs.get('user_id') try: net_i = db.DBSession.query(Network).filter(Network.id == network_id).one() net_i.check_write_permission(user_id) except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) _add_nodes_to_database(net_i, nodes) net_i.project_id=net_i.project_id db.DBSession.flush() node_s = db.DBSession.query(Node).filter(Node.network_id==network_id).all() #Maps temporary node_ids to real node_ids node_id_map = dict() iface_nodes = dict() for n_i in node_s: iface_nodes[n_i.name] = n_i for node in nodes: node_id_map[node.id] = iface_nodes[node.name] _bulk_add_resource_attrs(network_id, 'NODE', nodes, iface_nodes) log.info("Nodes added in %s", get_timing(start_time)) return node_s
Add nodes to network
entailment
def add_links(network_id, links,**kwargs): ''' add links to network ''' start_time = datetime.datetime.now() user_id = kwargs.get('user_id') names=[] # used to check uniqueness of link name before saving links to database for l_i in links: if l_i.name in names: raise HydraError("Duplicate Link Name: %s"%(l_i.name)) names.append(l_i.name) try: net_i = db.DBSession.query(Network).filter(Network.id == network_id).one() net_i.check_write_permission(user_id) except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) node_id_map=dict() for node in net_i.nodes: node_id_map[node.id]=node _add_links_to_database(net_i, links, node_id_map) net_i.project_id=net_i.project_id db.DBSession.flush() link_s = db.DBSession.query(Link).filter(Link.network_id==network_id).all() iface_links = {} for l_i in link_s: iface_links[l_i.name] = l_i link_attrs = _bulk_add_resource_attrs(net_i.id, 'LINK', links, iface_links) log.info("Nodes added in %s", get_timing(start_time)) return link_s
add links to network
entailment
def update_node(node, flush=True, **kwargs): """ Update a node. If new attributes are present, they will be added to the node. The non-presence of attributes does not remove them. The flush argument indicates whether dbsession.flush should be called. THis is set to False when update_node is called from another function which does the flush. """ user_id = kwargs.get('user_id') try: node_i = db.DBSession.query(Node).filter(Node.id == node.id).one() except NoResultFound: raise ResourceNotFoundError("Node %s not found"%(node.id)) node_i.network.check_write_permission(user_id) node_i.name = node.name if node.name is not None else node_i.name node_i.x = node.x if node.x is not None else node_i.x node_i.y = node.y if node.y is not None else node_i.y node_i.description = node.description if node.description is not None else node_i.description node_i.layout = node.get_layout() if node.layout is not None else node_i.layout if node.attributes is not None: _update_attributes(node_i, node.attributes) if node.types is not None: hdb.add_resource_types(node_i, node.types) if flush is True: db.DBSession.flush() return node_i
Update a node. If new attributes are present, they will be added to the node. The non-presence of attributes does not remove them. The flush argument indicates whether dbsession.flush should be called. THis is set to False when update_node is called from another function which does the flush.
entailment
def update_nodes(nodes,**kwargs): """ Update multiple nodes. If new attributes are present, they will be added to the node. The non-presence of attributes does not remove them. %TODO:merge this with the 'update_nodes' functionality in the 'update_netework' function, so we're not duplicating functionality. D.R.Y! returns: a list of updated nodes """ user_id = kwargs.get('user_id') updated_nodes = [] for n in nodes: updated_node_i = update_node(n, flush=False, user_id=user_id) updated_nodes.append(updated_node_i) db.DBSession.flush() return updated_nodes
Update multiple nodes. If new attributes are present, they will be added to the node. The non-presence of attributes does not remove them. %TODO:merge this with the 'update_nodes' functionality in the 'update_netework' function, so we're not duplicating functionality. D.R.Y! returns: a list of updated nodes
entailment
def set_node_status(node_id, status, **kwargs): """ Set the status of a node to 'X' """ user_id = kwargs.get('user_id') try: node_i = db.DBSession.query(Node).filter(Node.id == node_id).one() except NoResultFound: raise ResourceNotFoundError("Node %s not found"%(node_id)) node_i.network.check_write_permission(user_id) node_i.status = status for link in node_i.links_to: link.status = status for link in node_i.links_from: link.status = status db.DBSession.flush() return node_i
Set the status of a node to 'X'
entailment
def purge_network(network_id, purge_data,**kwargs): """ Remove a network from DB completely Use purge_data to try to delete the data associated with only this network. If no other resources link to this data, it will be deleted. """ user_id = kwargs.get('user_id') try: net_i = db.DBSession.query(Network).filter(Network.id == network_id).one() except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) log.info("Deleting network %s, id=%s", net_i.name, network_id) net_i.check_write_permission(user_id) db.DBSession.delete(net_i) db.DBSession.flush() return 'OK'
Remove a network from DB completely Use purge_data to try to delete the data associated with only this network. If no other resources link to this data, it will be deleted.
entailment
def _purge_datasets_unique_to_resource(ref_key, ref_id): """ Find the number of times a a resource and dataset combination occurs. If this equals the number of times the dataset appears, then we can say this dataset is unique to this resource, therefore it can be deleted """ count_qry = db.DBSession.query(ResourceScenario.dataset_id, func.count(ResourceScenario.dataset_id)).group_by( ResourceScenario.dataset_id).filter( ResourceScenario.resource_attr_id==ResourceAttr.id) if ref_key == 'NODE': count_qry.filter(ResourceAttr.node_id==ref_id) elif ref_key == 'LINK': count_qry.filter(ResourceAttr.link_id==ref_id) elif ref_key == 'GROUP': count_qry.filter(ResourceAttr.group_id==ref_id) count_rs = count_qry.all() for dataset_id, count in count_rs: full_dataset_count = db.DBSession.query(ResourceScenario).filter(ResourceScenario.dataset_id==dataset_id).count() if full_dataset_count == count: """First delete all the resource scenarios""" datasets_rs_to_delete = db.DBSession.query(ResourceScenario).filter(ResourceScenario.dataset_id==dataset_id).all() for dataset_rs in datasets_rs_to_delete: db.DBSession.delete(dataset_rs) """Then delete all the datasets""" dataset_to_delete = db.DBSession.query(Dataset).filter(Dataset.id==dataset_id).one() log.info("Deleting %s dataset %s (%s)", ref_key, dataset_to_delete.name, dataset_to_delete.id) db.DBSession.delete(dataset_to_delete)
Find the number of times a a resource and dataset combination occurs. If this equals the number of times the dataset appears, then we can say this dataset is unique to this resource, therefore it can be deleted
entailment
def delete_node(node_id, purge_data,**kwargs): """ Remove node from DB completely If there are attributes on the node, use purge_data to try to delete the data. If no other resources link to this data, it will be deleted. """ user_id = kwargs.get('user_id') try: node_i = db.DBSession.query(Node).filter(Node.id == node_id).one() except NoResultFound: raise ResourceNotFoundError("Node %s not found"%(node_id)) group_items = db.DBSession.query(ResourceGroupItem).filter( ResourceGroupItem.node_id==node_id).all() for gi in group_items: db.DBSession.delete(gi) if purge_data == 'Y': _purge_datasets_unique_to_resource('NODE', node_id) log.info("Deleting node %s, id=%s", node_i.name, node_id) node_i.network.check_write_permission(user_id) db.DBSession.delete(node_i) db.DBSession.flush() return 'OK'
Remove node from DB completely If there are attributes on the node, use purge_data to try to delete the data. If no other resources link to this data, it will be deleted.
entailment
def add_link(network_id, link,**kwargs): """ Add a link to a network """ user_id = kwargs.get('user_id') #check_perm(user_id, 'edit_topology') try: net_i = db.DBSession.query(Network).filter(Network.id == network_id).one() net_i.check_write_permission(user_id) except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) try: node_1 = db.DBSession.query(Node).filter(Node.id==link.node_1_id).one() node_2 = db.DBSession.query(Node).filter(Node.id==link.node_2_id).one() except NoResultFound: raise ResourceNotFoundError("Nodes for link not found") link_i = net_i.add_link(link.name, link.description, link.layout, node_1, node_2) hdb.add_resource_attributes(link_i, link.attributes) db.DBSession.flush() if link.types is not None and len(link.types) > 0: res_types = [] res_attrs = [] res_scenarios = {} for typesummary in link.types: ra, rt, rs = template.set_resource_type(link_i, typesummary.id, **kwargs) res_types.append(rt) res_attrs.extend(ra) res_scenarios.update(rs)#rs is a dict if len(res_types) > 0: db.DBSession.bulk_insert_mappings(ResourceType, res_types) if len(res_attrs) > 0: db.DBSession.bulk_insert_mappings(ResourceAttr, res_attrs) new_res_attrs = db.DBSession.query(ResourceAttr).order_by(ResourceAttr.id.desc()).limit(len(res_attrs)).all() all_rs = [] for ra in new_res_attrs: ra_id = ra.id if ra.attr_id in res_scenarios: rs_list = res_scenarios[ra.attr_id] for rs in rs_list: rs_list[rs]['resource_attr_id'] = ra_id all_rs.append(rs_list[rs]) if len(all_rs) > 0: db.DBSession.bulk_insert_mappings(ResourceScenario, all_rs) db.DBSession.refresh(link_i) return link_i
Add a link to a network
entailment
def update_link(link,**kwargs): """ Update a link. """ user_id = kwargs.get('user_id') #check_perm(user_id, 'edit_topology') try: link_i = db.DBSession.query(Link).filter(Link.id == link.id).one() link_i.network.check_write_permission(user_id) except NoResultFound: raise ResourceNotFoundError("Link %s not found"%(link.id)) #Each of thiese should be updateable independently if link.name is not None: link_i.name = link.name if link.node_1_id is not None: link_i.node_1_id = link.node_1_id if link.node_2_id is not None: link_i.node_2_id = link.node_2_id if link.description is not None: link_i.description = link.description if link.layout is not None: link_i.layout = link.get_layout() if link.attributes is not None: hdb.add_resource_attributes(link_i, link.attributes) if link.types is not None: hdb.add_resource_types(link_i, link.types) db.DBSession.flush() return link_i
Update a link.
entailment
def set_link_status(link_id, status, **kwargs): """ Set the status of a link """ user_id = kwargs.get('user_id') #check_perm(user_id, 'edit_topology') try: link_i = db.DBSession.query(Link).filter(Link.id == link_id).one() except NoResultFound: raise ResourceNotFoundError("Link %s not found"%(link_id)) link_i.network.check_write_permission(user_id) link_i.status = status db.DBSession.flush()
Set the status of a link
entailment
def delete_link(link_id, purge_data,**kwargs): """ Remove link from DB completely If there are attributes on the link, use purge_data to try to delete the data. If no other resources link to this data, it will be deleted. """ user_id = kwargs.get('user_id') try: link_i = db.DBSession.query(Link).filter(Link.id == link_id).one() except NoResultFound: raise ResourceNotFoundError("Link %s not found"%(link_id)) group_items = db.DBSession.query(ResourceGroupItem).filter( ResourceGroupItem.link_id==link_id).all() for gi in group_items: db.DBSession.delete(gi) if purge_data == 'Y': _purge_datasets_unique_to_resource('LINK', link_id) log.info("Deleting link %s, id=%s", link_i.name, link_id) link_i.network.check_write_permission(user_id) db.DBSession.delete(link_i) db.DBSession.flush()
Remove link from DB completely If there are attributes on the link, use purge_data to try to delete the data. If no other resources link to this data, it will be deleted.
entailment
def add_group(network_id, group,**kwargs): """ Add a resourcegroup to a network """ user_id = kwargs.get('user_id') try: net_i = db.DBSession.query(Network).filter(Network.id == network_id).one() net_i.check_write_permission(user_id=user_id) except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) res_grp_i = net_i.add_group(group.name, group.description, group.status) hdb.add_resource_attributes(res_grp_i, group.attributes) db.DBSession.flush() if group.types is not None and len(group.types) > 0: res_types = [] res_attrs = [] res_scenarios = {} for typesummary in group.types: ra, rt, rs = template.set_resource_type(res_grp_i, typesummary.id, **kwargs) res_types.append(rt) res_attrs.extend(ra) res_scenarios.update(rs)#rs is a dict if len(res_types) > 0: db.DBSession.bulk_insert_mappings(ResourceType, res_types) if len(res_attrs) > 0: db.DBSession.bulk_insert_mappings(ResourceAttr, res_attrs) new_res_attrs = db.DBSession.query(ResourceAttr).order_by(ResourceAttr.id.desc()).limit(len(res_attrs)).all() all_rs = [] for ra in new_res_attrs: ra_id = ra.id if ra.attr_id in res_scenarios: rs_list = res_scenarios[ra.attr_id] for rs in rs_list: rs_list[rs]['resource_attr_id'] = ra_id all_rs.append(rs_list[rs]) if len(all_rs) > 0: db.DBSession.bulk_insert_mappings(ResourceScenario, all_rs) db.DBSession.refresh(res_grp_i) return res_grp_i
Add a resourcegroup to a network
entailment
def update_group(group,**kwargs): """ Update a group. If new attributes are present, they will be added to the group. The non-presence of attributes does not remove them. """ user_id = kwargs.get('user_id') try: group_i = db.DBSession.query(ResourceGroup).filter(ResourceGroup.id == group.id).one() except NoResultFound: raise ResourceNotFoundError("group %s not found"%(group.id)) group_i.network.check_write_permission(user_id) group_i.name = group.name if group.name != None else group_i.name group_i.description = group.description if group.description else group_i.description if group.attributes is not None: _update_attributes(group_i, group.attributes) if group.types is not None: hdb.add_resource_types(group_i, group.types) db.DBSession.flush() return group_i
Update a group. If new attributes are present, they will be added to the group. The non-presence of attributes does not remove them.
entailment
def set_group_status(group_id, status, **kwargs): """ Set the status of a group to 'X' """ user_id = kwargs.get('user_id') try: group_i = db.DBSession.query(ResourceGroup).filter(ResourceGroup.id == group_id).one() except NoResultFound: raise ResourceNotFoundError("ResourceGroup %s not found"%(group_id)) group_i.network.check_write_permission(user_id) group_i.status = status db.DBSession.flush() return group_i
Set the status of a group to 'X'
entailment
def delete_group(group_id, purge_data,**kwargs): """ Remove group from DB completely If there are attributes on the group, use purge_data to try to delete the data. If no other resources group to this data, it will be deleted. """ user_id = kwargs.get('user_id') try: group_i = db.DBSession.query(ResourceGroup).filter(ResourceGroup.id == group_id).one() except NoResultFound: raise ResourceNotFoundError("Group %s not found"%(group_id)) group_items = db.DBSession.query(ResourceGroupItem).filter( ResourceGroupItem.group_id==group_id).all() for gi in group_items: db.DBSession.delete(gi) if purge_data == 'Y': _purge_datasets_unique_to_resource('GROUP', group_id) log.info("Deleting group %s, id=%s", group_i.name, group_id) group_i.network.check_write_permission(user_id) db.DBSession.delete(group_i) db.DBSession.flush()
Remove group from DB completely If there are attributes on the group, use purge_data to try to delete the data. If no other resources group to this data, it will be deleted.
entailment
def get_scenarios(network_id,**kwargs): """ Get all the scenarios in a given network. """ user_id = kwargs.get('user_id') try: net_i = db.DBSession.query(Network).filter(Network.id == network_id).one() net_i.check_read_permission(user_id=user_id) except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) return net_i.scenarios
Get all the scenarios in a given network.
entailment
def validate_network_topology(network_id,**kwargs): """ Check for the presence of orphan nodes in a network. """ user_id = kwargs.get('user_id') try: net_i = db.DBSession.query(Network).filter(Network.id == network_id).one() net_i.check_write_permission(user_id=user_id) except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) nodes = [] for node_i in net_i.nodes: if node_i.status == 'A': nodes.append(node_i.node_id) link_nodes = [] for link_i in net_i.links: if link_i.status != 'A': continue if link_i.node_1_id not in link_nodes: link_nodes.append(link_i.node_1_id) if link_i.node_2_id not in link_nodes: link_nodes.append(link_i.node_2_id) nodes = set(nodes) link_nodes = set(link_nodes) isolated_nodes = nodes - link_nodes return isolated_nodes
Check for the presence of orphan nodes in a network.
entailment
def get_resources_of_type(network_id, type_id, **kwargs): """ Return the Nodes, Links and ResourceGroups which have the type specified. """ nodes_with_type = db.DBSession.query(Node).join(ResourceType).filter(Node.network_id==network_id, ResourceType.type_id==type_id).all() links_with_type = db.DBSession.query(Link).join(ResourceType).filter(Link.network_id==network_id, ResourceType.type_id==type_id).all() groups_with_type = db.DBSession.query(ResourceGroup).join(ResourceType).filter(ResourceGroup.network_id==network_id, ResourceType.type_id==type_id).all() return nodes_with_type, links_with_type, groups_with_type
Return the Nodes, Links and ResourceGroups which have the type specified.
entailment
def clean_up_network(network_id, **kwargs): """ Purge any deleted nodes, links, resourcegroups and scenarios in a given network """ user_id = kwargs.get('user_id') #check_perm(user_id, 'delete_network') try: log.debug("Querying Network %s", network_id) net_i = db.DBSession.query(Network).filter(Network.id == network_id).\ options(noload('scenarios')).options(noload('nodes')).options(noload('links')).options(noload('resourcegroups')).options(joinedload_all('types.templatetype.template')).one() net_i.attributes #Define the basic resource queries node_qry = db.DBSession.query(Node).filter(Node.network_id==network_id).filter(Node.status=='X').all() link_qry = db.DBSession.query(Link).filter(Link.network_id==network_id).filter(Link.status=='X').all() group_qry = db.DBSession.query(ResourceGroup).filter(ResourceGroup.network_id==network_id).filter(ResourceGroup.status=='X').all() scenario_qry = db.DBSession.query(Scenario).filter(Scenario.network_id==network_id).filter(Scenario.status=='X').all() for n in node_qry: db.DBSession.delete(n) for l in link_qry: db.DBSession.delete(l) for g in group_qry: db.DBSession.delete(g) for s in scenario_qry: db.DBSession.delete(s) except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) db.DBSession.flush() return 'OK'
Purge any deleted nodes, links, resourcegroups and scenarios in a given network
entailment
def get_all_resource_attributes_in_network(attr_id, network_id, **kwargs): """ Find every resource attribute in the network matching the supplied attr_id """ user_id = kwargs.get('user_id') try: a = db.DBSession.query(Attr).filter(Attr.id == attr_id).one() except NoResultFound: raise HydraError("Attribute %s not found"%(attr_id,)) ra_qry = db.DBSession.query(ResourceAttr).filter( ResourceAttr.attr_id==attr_id, or_(Network.id == network_id, Node.network_id==network_id, Link.network_id==network_id, ResourceGroup.network_id==network_id) ).outerjoin('node')\ .outerjoin('link')\ .outerjoin('network')\ .outerjoin('resourcegroup')\ .options(joinedload_all('node'))\ .options(joinedload_all('link'))\ .options(joinedload_all('resourcegroup'))\ .options(joinedload_all('network')) resourceattrs = ra_qry.all() json_ra = [] #Load the metadata too for ra in resourceattrs: ra_j = JSONObject(ra, extras={'node':JSONObject(ra.node) if ra.node else None, 'link':JSONObject(ra.link) if ra.link else None, 'resourcegroup':JSONObject(ra.resourcegroup) if ra.resourcegroup else None, 'network':JSONObject(ra.network) if ra.network else None}) if ra_j.node is not None: ra_j.resource = ra_j.node elif ra_j.link is not None: ra_j.resource = ra_j.link elif ra_j.resourcegroup is not None: ra_j.resource = ra_j.resourcegroup elif ra.network is not None: ra_j.resource = ra_j.network json_ra.append(ra_j) return json_ra
Find every resource attribute in the network matching the supplied attr_id
entailment
def get_all_resource_data(scenario_id, include_metadata='N', page_start=None, page_end=None, **kwargs): """ A function which returns the data for all resources in a network. - """ rs_qry = db.DBSession.query( ResourceAttr.attr_id, Attr.name.label('attr_name'), ResourceAttr.id.label('resource_attr_id'), ResourceAttr.ref_key, ResourceAttr.network_id, ResourceAttr.node_id, ResourceAttr.link_id, ResourceAttr.group_id, ResourceAttr.project_id, ResourceAttr.attr_is_var, ResourceScenario.scenario_id, ResourceScenario.source, Dataset.id.label('dataset_id'), Dataset.name.label('dataset_name'), Dataset.value, Dataset.unit_id, Dataset.hidden, Dataset.type, null().label('metadata'), case([ (ResourceAttr.node_id != None, Node.name), (ResourceAttr.link_id != None, Link.name), (ResourceAttr.group_id != None, ResourceGroup.name), (ResourceAttr.network_id != None, Network.name), ]).label('ref_name'), ).join(ResourceScenario, ResourceScenario.resource_attr_id==ResourceAttr.id)\ .join(Dataset, ResourceScenario.dataset_id==Dataset.id).\ join(Attr, ResourceAttr.attr_id==Attr.id).\ outerjoin(Node, ResourceAttr.node_id==Node.id).\ outerjoin(Link, ResourceAttr.link_id==Link.id).\ outerjoin(ResourceGroup, ResourceAttr.group_id==ResourceGroup.id).\ outerjoin(Network, ResourceAttr.network_id==Network.id).\ filter(ResourceScenario.scenario_id==scenario_id) all_resource_data = rs_qry.all() if page_start is not None and page_end is None: all_resource_data = all_resource_data[page_start:] elif page_start is not None and page_end is not None: all_resource_data = all_resource_data[page_start:page_end] log.info("%s datasets retrieved", len(all_resource_data)) if include_metadata == 'Y': metadata_qry = db.DBSession.query(distinct(Metadata.dataset_id).label('dataset_id'), Metadata.key, Metadata.value).filter( ResourceScenario.resource_attr_id==ResourceAttr.id, ResourceScenario.scenario_id==scenario_id, Dataset.id==ResourceScenario.dataset_id, Metadata.dataset_id==Dataset.id) log.info("Querying node metadata") metadata = metadata_qry.all() log.info("%s metadata items retrieved", len(metadata)) metadata_dict = {} for m in metadata: if metadata_dict.get(m.dataset_id): metadata_dict[m.dataset_id].append(m) else: metadata_dict[m.dataset_id] = [m] return_data = [] for ra in all_resource_data: ra_dict = ra._asdict() if ra.hidden == 'Y': try: d = db.DBSession.query(Dataset).filter( Dataset.id==ra.dataset_id ).options(noload('metadata')).one() d.check_read_permission(kwargs.get('user_id')) except: ra_dict['value'] = None ra_dict['metadata'] = [] else: if include_metadata == 'Y': ra_dict['metadata'] = metadata_dict.get(ra.dataset_id, []) return_data.append(namedtuple('ResourceData', ra_dict.keys())(**ra_dict)) log.info("Returning %s datasets", len(return_data)) return return_data
A function which returns the data for all resources in a network. -
entailment
def clone_network(network_id, recipient_user_id=None, new_network_name=None, project_id=None, project_name=None, new_project=True, **kwargs): """ Create an exact clone of the specified network for the specified user. If project_id is specified, put the new network in there. Otherwise create a new project with the specified name and put it in there. """ user_id = kwargs['user_id'] ex_net = db.DBSession.query(Network).filter(Network.id==network_id).one() ex_net.check_read_permission(user_id) if project_id is None and new_project == True: log.info("Creating a new project for cloned network") ex_proj = db.DBSession.query(Project).filter(Project.id==ex_net.project_id).one() user = db.DBSession.query(User).filter(User.id==user_id).one() project = Project() if project_name is None or project_name=="": project_name=ex_proj.name + " (Cloned by %s)" % user.display_name #check a project with this name doesn't already exist: ex_project = db.DBSession.query(Project).filter(Project.name==project_name, Project.created_by==user_id).all() #If it exists, use it. if len(ex_project) > 0: project=ex_project[0] else: project.name = project_name project.created_by = user_id project.set_owner(user_id) if recipient_user_id!=None: project.set_owner(recipient_user_id) db.DBSession.add(project) db.DBSession.flush() project_id=project.id elif project_id is None: log.info("Using current project for cloned network") project_id=ex_net.project_id if new_network_name is None or new_network_name == "": new_network_name=ex_net.name log.info('Cloning Network...') #Find if there's any projects with this name in the project already ex_network = db.DBSession.query(Network).filter(Network.project_id==project_id, Network.name.like("{0}%".format(new_network_name))).all() if len(ex_network) > 0: new_network_name = new_network_name + " " + str(len(ex_network)) newnet = Network() newnet.project_id = project_id newnet.name = new_network_name newnet.description = ex_net.description newnet.layout = ex_net.layout newnet.status = ex_net.status newnet.projection = ex_net.projection newnet.created_by = user_id newnet.set_owner(user_id) if recipient_user_id is not None: newnet.set_owner(recipient_user_id) db.DBSession.add(newnet) db.DBSession.flush() newnetworkid = newnet.id log.info('CLoning Nodes') node_id_map = _clone_nodes(network_id, newnetworkid) log.info('Cloning Links') link_id_map = _clone_links(network_id, newnetworkid, node_id_map) log.info('CLoning Groups') group_id_map = _clone_groups(network_id, newnetworkid, node_id_map, link_id_map) log.info("Cloning Resource Attributes") ra_id_map = _clone_resourceattrs(network_id, newnetworkid, node_id_map, link_id_map, group_id_map) log.info("Cloning Resource Types") _clone_resourcetypes(network_id, newnetworkid, node_id_map, link_id_map, group_id_map) log.info('Cloning Scenarios') _clone_scenarios(network_id, newnetworkid, ra_id_map, node_id_map, link_id_map, group_id_map, user_id) db.DBSession.flush() return newnetworkid
Create an exact clone of the specified network for the specified user. If project_id is specified, put the new network in there. Otherwise create a new project with the specified name and put it in there.
entailment
def copy_data_from_scenario(resource_attrs, source_scenario_id, target_scenario_id, **kwargs): """ For a given list of resource attribute IDS copy the dataset_ids from the resource scenarios in the source scenario to those in the 'target' scenario. """ #Get all the resource scenarios we wish to update target_resourcescenarios = db.DBSession.query(ResourceScenario).filter( ResourceScenario.scenario_id==target_scenario_id, ResourceScenario.resource_attr_id.in_(resource_attrs)).all() target_rs_dict = {} for target_rs in target_resourcescenarios: target_rs_dict[target_rs.resource_attr_id] = target_rs #get all the resource scenarios we are using to get our datsets source. source_resourcescenarios = db.DBSession.query(ResourceScenario).filter( ResourceScenario.scenario_id==source_scenario_id, ResourceScenario.resource_attr_id.in_(resource_attrs)).all() #If there is an RS in scenario 'source' but not in 'target', then create #a new one in 'target' for source_rs in source_resourcescenarios: target_rs = target_rs_dict.get(source_rs.resource_attr_id) if target_rs is not None: target_rs.dataset_id = source_rs.dataset_id else: target_rs = ResourceScenario() target_rs.scenario_id = target_scenario_id target_rs.dataset_id = source_rs.dataset_id target_rs.resource_attr_id = source_rs.resource_attr_id db.DBSession.add(target_rs) db.DBSession.flush() return target_resourcescenarios
For a given list of resource attribute IDS copy the dataset_ids from the resource scenarios in the source scenario to those in the 'target' scenario.
entailment
def get_scenario(scenario_id,**kwargs): """ Get the specified scenario """ user_id = kwargs.get('user_id') scen_i = _get_scenario(scenario_id, user_id) scen_j = JSONObject(scen_i) rscen_rs = db.DBSession.query(ResourceScenario).filter(ResourceScenario.scenario_id==scenario_id).options(joinedload_all('dataset.metadata')).all() #lazy load resource attributes and attributes for rs in rscen_rs: rs.resourceattr rs.resourceattr.attr rgi_rs = db.DBSession.query(ResourceGroupItem).filter(ResourceGroupItem.scenario_id==scenario_id).all() scen_j.resourcescenarios = [] for rs in rscen_rs: rs_j = JSONObject(rs, extras={'resourceattr':JSONObject(rs.resourceattr)}) if rs.dataset.check_read_permission(user_id, do_raise=False) is False: rs_j.dataset['value'] = None rs_j.dataset.metadata = JSONObject({}) scen_j.resourcescenarios.append(rs_j) scen_j.resourcegroupitems =[JSONObject(r) for r in rgi_rs] return scen_j
Get the specified scenario
entailment
def add_scenario(network_id, scenario,**kwargs): """ Add a scenario to a specified network. """ user_id = int(kwargs.get('user_id')) log.info("Adding scenarios to network") _check_network_ownership(network_id, user_id) existing_scen = db.DBSession.query(Scenario).filter(Scenario.name==scenario.name, Scenario.network_id==network_id).first() if existing_scen is not None: raise HydraError("Scenario with name %s already exists in network %s"%(scenario.name, network_id)) scen = Scenario() scen.name = scenario.name scen.description = scenario.description scen.layout = scenario.get_layout() scen.network_id = network_id scen.created_by = user_id scen.start_time = str(timestamp_to_ordinal(scenario.start_time)) if scenario.start_time else None scen.end_time = str(timestamp_to_ordinal(scenario.end_time)) if scenario.end_time else None scen.time_step = scenario.time_step scen.resourcescenarios = [] scen.resourcegroupitems = [] #Just in case someone puts in a negative ID for the scenario. if scenario.id < 0: scenario.id = None if scenario.resourcescenarios is not None: #extract the data from each resourcescenario so it can all be #inserted in one go, rather than one at a time all_data = [r.dataset for r in scenario.resourcescenarios] datasets = data._bulk_insert_data(all_data, user_id=user_id) #record all the resource attribute ids resource_attr_ids = [r.resource_attr_id for r in scenario.resourcescenarios] #get all the resource scenarios into a list and bulk insert them for i, ra_id in enumerate(resource_attr_ids): rs_i = ResourceScenario() rs_i.resource_attr_id = ra_id rs_i.dataset_id = datasets[i].id rs_i.scenario_id = scen.id rs_i.dataset = datasets[i] scen.resourcescenarios.append(rs_i) if scenario.resourcegroupitems is not None: #Again doing bulk insert. for group_item in scenario.resourcegroupitems: group_item_i = ResourceGroupItem() group_item_i.scenario_id = scen.id group_item_i.group_id = group_item.group_id group_item_i.ref_key = group_item.ref_key if group_item.ref_key == 'NODE': group_item_i.node_id = group_item.ref_id elif group_item.ref_key == 'LINK': group_item_i.link_id = group_item.ref_id elif group_item.ref_key == 'GROUP': group_item_i.subgroup_id = group_item.ref_id scen.resourcegroupitems.append(group_item_i) db.DBSession.add(scen) db.DBSession.flush() return scen
Add a scenario to a specified network.
entailment
def update_scenario(scenario,update_data=True,update_groups=True,flush=True,**kwargs): """ Update a single scenario as all resources already exist, there is no need to worry about negative IDS flush = True flushes to the DB at the end of the function. flush = False does not flush, assuming that it will happen as part of another process, like update_network. """ user_id = kwargs.get('user_id') scen = _get_scenario(scenario.id, user_id) if scen.locked == 'Y': raise PermissionError('Scenario is locked. Unlock before editing.') start_time = None if isinstance(scenario.start_time, float): start_time = six.text_type(scenario.start_time) else: start_time = timestamp_to_ordinal(scenario.start_time) if start_time is not None: start_time = six.text_type(start_time) end_time = None if isinstance(scenario.end_time, float): end_time = six.text_type(scenario.end_time) else: end_time = timestamp_to_ordinal(scenario.end_time) if end_time is not None: end_time = six.text_type(end_time) scen.name = scenario.name scen.description = scenario.description scen.layout = scenario.get_layout() scen.start_time = start_time scen.end_time = end_time scen.time_step = scenario.time_step if scenario.resourcescenarios == None: scenario.resourcescenarios = [] if scenario.resourcegroupitems == None: scenario.resourcegroupitems = [] #lazy load resourcescenarios from the DB scen.resourcescenarios if update_data is True: datasets = [rs.dataset for rs in scenario.resourcescenarios] updated_datasets = data._bulk_insert_data(datasets, user_id, kwargs.get('app_name')) for i, r_scen in enumerate(scenario.resourcescenarios): _update_resourcescenario(scen, r_scen, dataset=updated_datasets[i], user_id=user_id, source=kwargs.get('app_name')) #lazy load resource grou items from the DB scen.resourcegroupitems if update_groups is True: #Get all the exiting resource group items for this scenario. #THen process all the items sent to this handler. #Any in the DB that are not passed in here are removed. for group_item in scenario.resourcegroupitems: _add_resourcegroupitem(group_item, scenario.id) if flush is True: db.DBSession.flush() return scen
Update a single scenario as all resources already exist, there is no need to worry about negative IDS flush = True flushes to the DB at the end of the function. flush = False does not flush, assuming that it will happen as part of another process, like update_network.
entailment
def set_scenario_status(scenario_id, status, **kwargs): """ Set the status of a scenario. """ user_id = kwargs.get('user_id') _check_can_edit_scenario(scenario_id, kwargs['user_id']) scenario_i = _get_scenario(scenario_id, user_id) scenario_i.status = status db.DBSession.flush() return 'OK'
Set the status of a scenario.
entailment
def purge_scenario(scenario_id, **kwargs): """ Set the status of a scenario. """ _check_can_edit_scenario(scenario_id, kwargs['user_id']) user_id = kwargs.get('user_id') scenario_i = _get_scenario(scenario_id, user_id) db.DBSession.delete(scenario_i) db.DBSession.flush() return 'OK'
Set the status of a scenario.
entailment
def _get_as_obj(obj_dict, name): """ Turn a dictionary into a named tuple so it can be passed into the constructor of a complex model generator. """ if obj_dict.get('_sa_instance_state'): del obj_dict['_sa_instance_state'] obj = namedtuple(name, tuple(obj_dict.keys())) for k, v in obj_dict.items(): setattr(obj, k, v) log.info("%s = %s",k,getattr(obj,k)) return obj
Turn a dictionary into a named tuple so it can be passed into the constructor of a complex model generator.
entailment
def get_resource_scenario(resource_attr_id, scenario_id, **kwargs): """ Get the resource scenario object for a given resource atttribute and scenario. This is done when you know the attribute, resource and scenario and want to get the value associated with it. """ user_id = kwargs.get('user_id') _get_scenario(scenario_id, user_id) try: rs = db.DBSession.query(ResourceScenario).filter(ResourceScenario.resource_attr_id==resource_attr_id, ResourceScenario.scenario_id == scenario_id).options(joinedload_all('dataset')).options(joinedload_all('dataset.metadata')).one() return rs except NoResultFound: raise ResourceNotFoundError("resource scenario for %s not found in scenario %s"%(resource_attr_id, scenario_id))
Get the resource scenario object for a given resource atttribute and scenario. This is done when you know the attribute, resource and scenario and want to get the value associated with it.
entailment
def bulk_update_resourcedata(scenario_ids, resource_scenarios,**kwargs): """ Update the data associated with a list of scenarios. """ user_id = kwargs.get('user_id') res = None res = {} net_ids = db.DBSession.query(Scenario.network_id).filter(Scenario.id.in_(scenario_ids)).all() if len(set(net_ids)) != 1: raise HydraError("Scenario IDS are not in the same network") for scenario_id in scenario_ids: _check_can_edit_scenario(scenario_id, kwargs['user_id']) scen_i = _get_scenario(scenario_id, user_id) res[scenario_id] = [] for rs in resource_scenarios: if rs.dataset is not None: updated_rs = _update_resourcescenario(scen_i, rs, user_id=user_id, source=kwargs.get('app_name')) res[scenario_id].append(updated_rs) else: _delete_resourcescenario(scenario_id, rs.resource_attr_id) db.DBSession.flush() return res
Update the data associated with a list of scenarios.
entailment
def update_resourcedata(scenario_id, resource_scenarios,**kwargs): """ Update the data associated with a scenario. Data missing from the resource scenario will not be removed from the scenario. Use the remove_resourcedata for this task. If the resource scenario does not exist, it will be created. If the value of the resource scenario is specified as being None, the resource scenario will be deleted. If the value of the resource scenario does not exist, it will be created. If the both the resource scenario and value already exist, the resource scenario will be updated with the ID of the dataset. If the dataset being set is being changed, already exists, and is only used by a single resource scenario, then the dataset itself is updated, rather than a new one being created. """ user_id = kwargs.get('user_id') res = None _check_can_edit_scenario(scenario_id, kwargs['user_id']) scen_i = _get_scenario(scenario_id, user_id) res = [] for rs in resource_scenarios: if rs.dataset is not None: updated_rs = _update_resourcescenario(scen_i, rs, user_id=user_id, source=kwargs.get('app_name')) res.append(updated_rs) else: _delete_resourcescenario(scenario_id, rs.resource_attr_id) db.DBSession.flush() return res
Update the data associated with a scenario. Data missing from the resource scenario will not be removed from the scenario. Use the remove_resourcedata for this task. If the resource scenario does not exist, it will be created. If the value of the resource scenario is specified as being None, the resource scenario will be deleted. If the value of the resource scenario does not exist, it will be created. If the both the resource scenario and value already exist, the resource scenario will be updated with the ID of the dataset. If the dataset being set is being changed, already exists, and is only used by a single resource scenario, then the dataset itself is updated, rather than a new one being created.
entailment
def delete_resource_scenario(scenario_id, resource_attr_id, quiet=False, **kwargs): """ Remove the data associated with a resource in a scenario. """ _check_can_edit_scenario(scenario_id, kwargs['user_id']) _delete_resourcescenario(scenario_id, resource_attr_id, suppress_error=quiet)
Remove the data associated with a resource in a scenario.
entailment
def delete_resourcedata(scenario_id, resource_scenario, quiet = False, **kwargs): """ Remove the data associated with a resource in a scenario. The 'quiet' parameter indicates whether an non-existent RS should throw an error. """ _check_can_edit_scenario(scenario_id, kwargs['user_id']) _delete_resourcescenario(scenario_id, resource_scenario.resource_attr_id, suppress_error=quiet)
Remove the data associated with a resource in a scenario. The 'quiet' parameter indicates whether an non-existent RS should throw an error.
entailment
def _update_resourcescenario(scenario, resource_scenario, dataset=None, new=False, user_id=None, source=None): """ Insert or Update the value of a resource's attribute by first getting the resource, then parsing the input data, then assigning the value. returns a ResourceScenario object. """ if scenario is None: scenario = db.DBSession.query(Scenario).filter(Scenario.id==1).one() ra_id = resource_scenario.resource_attr_id log.debug("Assigning resource attribute: %s",ra_id) try: r_scen_i = db.DBSession.query(ResourceScenario).filter( ResourceScenario.scenario_id==scenario.id, ResourceScenario.resource_attr_id==ra_id).one() except NoResultFound as e: log.info("Creating new RS") r_scen_i = ResourceScenario() r_scen_i.resource_attr_id = resource_scenario.resource_attr_id r_scen_i.scenario_id = scenario.id r_scen_i.scenario = scenario db.DBSession.add(r_scen_i) if scenario.locked == 'Y': log.info("Scenario %s is locked",scenario.id) return r_scen_i if dataset is not None: r_scen_i.dataset = dataset return r_scen_i dataset = resource_scenario.dataset value = dataset.parse_value() log.info("Assigning %s to resource attribute: %s", value, ra_id) if value is None: log.info("Cannot set data on resource attribute %s",ra_id) return None metadata = dataset.get_metadata_as_dict(source=source, user_id=user_id) data_unit_id = dataset.unit_id data_hash = dataset.get_hash(value, metadata) assign_value(r_scen_i, dataset.type.lower(), value, data_unit_id, dataset.name, metadata=metadata, data_hash=data_hash, user_id=user_id, source=source) return r_scen_i
Insert or Update the value of a resource's attribute by first getting the resource, then parsing the input data, then assigning the value. returns a ResourceScenario object.
entailment
def assign_value(rs, data_type, val, unit_id, name, metadata={}, data_hash=None, user_id=None, source=None): """ Insert or update a piece of data in a scenario. If the dataset is being shared by other resource scenarios, a new dataset is inserted. If the dataset is ONLY being used by the resource scenario in question, the dataset is updated to avoid unnecessary duplication. """ log.debug("Assigning value %s to rs %s in scenario %s", name, rs.resource_attr_id, rs.scenario_id) if rs.scenario.locked == 'Y': raise PermissionError("Cannot assign value. Scenario %s is locked" %(rs.scenario_id)) #Check if this RS is the only RS in the DB connected to this dataset. #If no results is found, the RS isn't in the DB yet, so the condition is false. update_dataset = False # Default behaviour is to create a new dataset. if rs.dataset is not None: #Has this dataset changed? if rs.dataset.hash == data_hash: log.debug("Dataset has not changed. Returning.") return connected_rs = db.DBSession.query(ResourceScenario).filter(ResourceScenario.dataset_id==rs.dataset.id).all() #If there's no RS found, then the incoming rs is new, so the dataset can be altered #without fear of affecting something else. if len(connected_rs) == 0: #If it's 1, the RS exists in the DB, but it's the only one using this dataset or #The RS isn't in the DB yet and the datset is being used by 1 other RS. update_dataset = True if len(connected_rs) == 1 : if connected_rs[0].scenario_id == rs.scenario_id and connected_rs[0].resource_attr_id==rs.resource_attr_id: update_dataset = True else: update_dataset=False if update_dataset is True: log.info("Updating dataset '%s'", name) dataset = data.update_dataset(rs.dataset.id, name, data_type, val, unit_id, metadata, flush=False, **dict(user_id=user_id)) rs.dataset = dataset rs.dataset_id = dataset.id log.info("Set RS dataset id to %s"%dataset.id) else: log.info("Creating new dataset %s in scenario %s", name, rs.scenario_id) dataset = data.add_dataset(data_type, val, unit_id, metadata=metadata, name=name, **dict(user_id=user_id)) rs.dataset = dataset rs.source = source db.DBSession.flush()
Insert or update a piece of data in a scenario. If the dataset is being shared by other resource scenarios, a new dataset is inserted. If the dataset is ONLY being used by the resource scenario in question, the dataset is updated to avoid unnecessary duplication.
entailment
def add_data_to_attribute(scenario_id, resource_attr_id, dataset,**kwargs): """ Add data to a resource scenario outside of a network update """ user_id = kwargs.get('user_id') _check_can_edit_scenario(scenario_id, user_id) scenario_i = _get_scenario(scenario_id, user_id) try: r_scen_i = db.DBSession.query(ResourceScenario).filter( ResourceScenario.scenario_id==scenario_id, ResourceScenario.resource_attr_id==resource_attr_id).one() log.info("Existing resource scenario found for %s in scenario %s", resource_attr_id, scenario_id) except NoResultFound: log.info("No existing resource scenarios found for %s in scenario %s. Adding a new one.", resource_attr_id, scenario_id) r_scen_i = ResourceScenario() r_scen_i.scenario_id = scenario_id r_scen_i.resource_attr_id = resource_attr_id scenario_i.resourcescenarios.append(r_scen_i) data_type = dataset.type.lower() value = dataset.parse_value() dataset_metadata = dataset.get_metadata_as_dict(user_id=kwargs.get('user_id'), source=kwargs.get('source')) if value is None: raise HydraError("Cannot set value to attribute. " "No value was sent with dataset %s", dataset.id) data_hash = dataset.get_hash(value, dataset_metadata) assign_value(r_scen_i, data_type, value, dataset.unit_id, dataset.name, metadata=dataset_metadata, data_hash=data_hash, user_id=user_id) db.DBSession.flush() return r_scen_i
Add data to a resource scenario outside of a network update
entailment
def get_scenario_data(scenario_id,**kwargs): """ Get all the datasets from the group with the specified name @returns a list of dictionaries """ user_id = kwargs.get('user_id') scenario_data = db.DBSession.query(Dataset).filter(Dataset.id==ResourceScenario.dataset_id, ResourceScenario.scenario_id==scenario_id).options(joinedload_all('metadata')).distinct().all() for sd in scenario_data: if sd.hidden == 'Y': try: sd.check_read_permission(user_id) except: sd.value = None sd.metadata = [] db.DBSession.expunge_all() log.info("Retrieved %s datasets", len(scenario_data)) return scenario_data
Get all the datasets from the group with the specified name @returns a list of dictionaries
entailment
def get_attribute_data(attr_ids, node_ids, **kwargs): """ For a given attribute or set of attributes, return all the resources and resource scenarios in the network """ node_attrs = db.DBSession.query(ResourceAttr).\ options(joinedload_all('attr')).\ filter(ResourceAttr.node_id.in_(node_ids), ResourceAttr.attr_id.in_(attr_ids)).all() ra_ids = [] for ra in node_attrs: ra_ids.append(ra.id) resource_scenarios = db.DBSession.query(ResourceScenario).filter(ResourceScenario.resource_attr_id.in_(ra_ids)).options(joinedload('resourceattr')).options(joinedload_all('dataset.metadata')).order_by(ResourceScenario.scenario_id).all() for rs in resource_scenarios: if rs.dataset.hidden == 'Y': try: rs.dataset.check_read_permission(kwargs.get('user_id')) except: rs.dataset.value = None db.DBSession.expunge(rs) return node_attrs, resource_scenarios
For a given attribute or set of attributes, return all the resources and resource scenarios in the network
entailment
def get_resource_data(ref_key, ref_id, scenario_id, type_id=None, expunge_session=True, **kwargs): """ Get all the resource scenarios for a given resource in a given scenario. If type_id is specified, only return the resource scenarios for the attributes within the type. """ user_id = kwargs.get('user_id') resource_data_qry = db.DBSession.query(ResourceScenario).filter( ResourceScenario.dataset_id == Dataset.id, ResourceAttr.id == ResourceScenario.resource_attr_id, ResourceScenario.scenario_id == scenario_id, ResourceAttr.ref_key == ref_key, or_( ResourceAttr.network_id==ref_id, ResourceAttr.node_id==ref_id, ResourceAttr.link_id==ref_id, ResourceAttr.group_id==ref_id )).distinct().\ options(joinedload('resourceattr')).\ options(joinedload_all('dataset.metadata')).\ order_by(ResourceAttr.attr_is_var) if type_id is not None: attr_ids = [] rs = db.DBSession.query(TypeAttr).filter(TypeAttr.type_id==type_id).all() for r in rs: attr_ids.append(r.attr_id) resource_data_qry = resource_data_qry.filter(ResourceAttr.attr_id.in_(attr_ids)) resource_data = resource_data_qry.all() for rs in resource_data: #TODO: Design a mechanism to read the value of the dataset if it's stored externally if rs.dataset.hidden == 'Y': try: rs.dataset.check_read_permission(user_id) except: rs.dataset.value = None if expunge_session == True: db.DBSession.expunge_all() return resource_data
Get all the resource scenarios for a given resource in a given scenario. If type_id is specified, only return the resource scenarios for the attributes within the type.
entailment
def get_attribute_datasets(attr_id, scenario_id, **kwargs): """ Retrieve all the datasets in a scenario for a given attribute. Also return the resource attributes so there is a reference to the node/link """ user_id = kwargs.get('user_id') scenario_i = _get_scenario(scenario_id, user_id) try: a = db.DBSession.query(Attr).filter(Attr.id == attr_id).one() except NoResultFound: raise HydraError("Attribute %s not found"%(attr_id,)) rs_qry = db.DBSession.query(ResourceScenario).filter( ResourceAttr.attr_id==attr_id, ResourceScenario.scenario_id==scenario_i.id, ResourceScenario.resource_attr_id==ResourceAttr.id ).options(joinedload_all('dataset'))\ .options(joinedload_all('resourceattr'))\ .options(joinedload_all('resourceattr.node'))\ .options(joinedload_all('resourceattr.link'))\ .options(joinedload_all('resourceattr.resourcegroup'))\ .options(joinedload_all('resourceattr.network')) resourcescenarios = rs_qry.all() json_rs = [] #Load the metadata too for rs in resourcescenarios: rs.dataset.metadata tmp_rs = JSONObject(rs) tmp_rs.resourceattr=JSONObject(rs.resourceattr) if rs.resourceattr.node_id is not None: tmp_rs.resourceattr.node = JSONObject(rs.resourceattr.node) elif rs.resourceattr.link_id is not None: tmp_rs.resourceattr.link = JSONObject(rs.resourceattr.link) elif rs.resourceattr.group_id is not None: tmp_rs.resourceattr.resourcegroup = JSONObject(rs.resourceattr.resourcegroup) elif rs.resourceattr.network_id is not None: tmp_rs.resourceattr.network = JSONObject(rs.resourceattr.network) json_rs.append(tmp_rs) return json_rs
Retrieve all the datasets in a scenario for a given attribute. Also return the resource attributes so there is a reference to the node/link
entailment
def get_resourcegroupitems(group_id, scenario_id, **kwargs): """ Get all the items in a group, in a scenario. If group_id is None, return all items across all groups in the scenario. """ rgi_qry = db.DBSession.query(ResourceGroupItem).\ filter(ResourceGroupItem.scenario_id==scenario_id) if group_id is not None: rgi_qry = rgi_qry.filter(ResourceGroupItem.group_id==group_id) rgi = rgi_qry.all() return rgi
Get all the items in a group, in a scenario. If group_id is None, return all items across all groups in the scenario.
entailment
def delete_resourcegroupitems(scenario_id, item_ids, **kwargs): """ Delete specified items in a group, in a scenario. """ user_id = int(kwargs.get('user_id')) #check the scenario exists _get_scenario(scenario_id, user_id) for item_id in item_ids: rgi = db.DBSession.query(ResourceGroupItem).\ filter(ResourceGroupItem.id==item_id).one() db.DBSession.delete(rgi) db.DBSession.flush()
Delete specified items in a group, in a scenario.
entailment
def empty_group(group_id, scenario_id, **kwargs): """ Delete all itemas in a group, in a scenario. """ user_id = int(kwargs.get('user_id')) #check the scenario exists _get_scenario(scenario_id, user_id) rgi = db.DBSession.query(ResourceGroupItem).\ filter(ResourceGroupItem.group_id==group_id).\ filter(ResourceGroupItem.scenario_id==scenario_id).all() rgi.delete()
Delete all itemas in a group, in a scenario.
entailment
def add_resourcegroupitems(scenario_id, items, scenario=None, **kwargs): """ Get all the items in a group, in a scenario. """ user_id = int(kwargs.get('user_id')) if scenario is None: scenario = _get_scenario(scenario_id, user_id) _check_network_ownership(scenario.network_id, user_id) newitems = [] for group_item in items: group_item_i = _add_resourcegroupitem(group_item, scenario.id) newitems.append(group_item_i) db.DBSession.flush() return newitems
Get all the items in a group, in a scenario.
entailment
def _add_resourcegroupitem(group_item, scenario_id): """ Add a single resource group item (no DB flush, as it's an internal function) """ if group_item.id and group_item.id > 0: try: group_item_i = db.DBSession.query(ResourceGroupItem).filter(ResourceGroupItem.id == group_item.id).one() except NoResultFound: raise ResourceNotFoundError("ResourceGroupItem %s not found" % (group_item.id)) else: group_item_i = ResourceGroupItem() group_item_i.group_id = group_item.group_id if scenario_id is not None: group_item_i.scenario_id = scenario_id db.DBSession.add(group_item_i) ref_key = group_item.ref_key group_item_i.ref_key = ref_key if ref_key == 'NODE': group_item_i.node_id =group_item.ref_id if group_item.ref_id else group_item.node_id elif ref_key == 'LINK': group_item_i.link_id =group_item.ref_id if group_item.ref_id else group_item.link_id elif ref_key == 'GROUP': group_item_i.subgroup_id = group_item.ref_id if group_item.ref_id else group_item.subgroup_id return group_item_i
Add a single resource group item (no DB flush, as it's an internal function)
entailment
def update_value_from_mapping(source_resource_attr_id, target_resource_attr_id, source_scenario_id, target_scenario_id, **kwargs): """ Using a resource attribute mapping, take the value from the source and apply it to the target. Both source and target scenarios must be specified (and therefor must exist). """ user_id = int(kwargs.get('user_id')) rm = aliased(ResourceAttrMap, name='rm') #Check the mapping exists. mapping = db.DBSession.query(rm).filter( or_( and_( rm.resource_attr_id_a == source_resource_attr_id, rm.resource_attr_id_b == target_resource_attr_id ), and_( rm.resource_attr_id_a == target_resource_attr_id, rm.resource_attr_id_b == source_resource_attr_id ) ) ).first() if mapping is None: raise ResourceNotFoundError("Mapping between %s and %s not found"% (source_resource_attr_id, target_resource_attr_id)) #check scenarios exist s1 = _get_scenario(source_scenario_id, user_id) s2 = _get_scenario(target_scenario_id, user_id) rs = aliased(ResourceScenario, name='rs') rs1 = db.DBSession.query(rs).filter(rs.resource_attr_id == source_resource_attr_id, rs.scenario_id == source_scenario_id).first() rs2 = db.DBSession.query(rs).filter(rs.resource_attr_id == target_resource_attr_id, rs.scenario_id == target_scenario_id).first() #3 possibilities worth considering: #1: Both RS exist, so update the target RS #2: Target RS does not exist, so create it with the dastaset from RS1 #3: Source RS does not exist, so it must be removed from the target scenario if it exists return_value = None#Either return null or return a new or updated resource scenario if rs1 is not None: if rs2 is not None: log.info("Destination Resource Scenario exists. Updating dastaset ID") rs2.dataset_id = rs1.dataset_id else: log.info("Destination has no data, so making a new Resource Scenario") rs2 = ResourceScenario(resource_attr_id=target_resource_attr_id, scenario_id=target_scenario_id, dataset_id=rs1.dataset_id) db.DBSession.add(rs2) db.DBSession.flush() return_value = rs2 else: log.info("Source Resource Scenario does not exist. Deleting destination Resource Scenario") if rs2 is not None: db.DBSession.delete(rs2) db.DBSession.flush() return return_value
Using a resource attribute mapping, take the value from the source and apply it to the target. Both source and target scenarios must be specified (and therefor must exist).
entailment
def get_plugins(**kwargs): """ Get all available plugins """ plugins = [] plugin_paths = [] #Look in directory or set of directories for #plugins base_plugin_dir = config.get('plugin', 'default_directory') plugin_xsd_path = config.get('plugin', 'plugin_xsd_path') base_plugin_dir_contents = os.listdir(base_plugin_dir) for directory in base_plugin_dir_contents: #ignore hidden files if directory[0] == '.' or directory == 'xml': continue #Is this a file or a directory? If it's a directory, it's a plugin. path = os.path.join(base_plugin_dir, directory) if os.path.isdir(path): plugin_paths.append(path) #For each plugin, get its details (an XML string) #Retrieve the xml schema for validating the XML to make sure #what is being provided to the IU is correct. xmlschema_doc = etree.parse(plugin_xsd_path) xmlschema = etree.XMLSchema(xmlschema_doc) #Get the xml description file from the plugin directory. If there #is no xml file, the plugin in unusable. for plugin_dir in plugin_paths: full_plugin_path = os.path.join(plugin_dir, 'trunk') dir_contents = os.listdir(full_plugin_path) #look for a plugin.xml file in the plugin directory for file_name in dir_contents: file_path = os.path.join(full_plugin_path, file_name) if file_name == 'plugin.xml': f = open(file_path, 'r') #validate the xml using the xml schema for defining #plugin details try: y = open(file_path, 'r') xml_tree = etree.parse(y) xmlschema.assertValid(xml_tree) plugins.append(etree.tostring(xml_tree)) except Exception as e: log.critical("Schema %s did not validate! (error was %s)"%(file_name, e)) break else: log.warning("No xml plugin details found for %s. Ignoring", plugin_dir) return plugins
Get all available plugins
entailment
def run_plugin(plugin,**kwargs): """ Run a plugin """ args = [sys.executable] #Get plugin executable home = os.path.expanduser('~') path_to_plugin = os.path.join(home, 'svn/HYDRA/HydraPlugins', plugin.location) args.append(path_to_plugin) #Parse plugin arguments into a string plugin_params = " " for p in plugin.params: param = "--%s=%s "%(p.name, p.value) args.append("--%s"%p.name) args.append(p.value) plugin_params = plugin_params + param log_dir = config.get('plugin', 'result_file') log_file = os.path.join(home, log_dir, plugin.name) #this reads all the logs so far. We're not interested in them #Everything after this is new content to the file try: f = open(log_file, 'r') f.read() except: f = open(log_file, 'w') f.close() f = open(log_file, 'r') pid = subprocess.Popen(args).pid #run plugin #os.system("%s %s"%(path_to_plugin, plugin_params)) log.info("Process started! PID: %s", pid) return str(pid)
Run a plugin
entailment
def load_config(): """Load a config file. This function looks for a config (*.ini) file in the following order:: (1) ./*.ini (2) ~/.config/hydra/ (3) /etc/hydra (4) /path/to/hydra_base/*.ini (1) will override (2) will override (3) will override (4). Parameters not defined in (1) will be taken from (2). Parameters not defined in (2) will be taken from (3). (3) is the config folder that will be checked out from the svn repository. (2) Will be be provided as soon as an installable distribution is available. (1) will usually be written individually by every user.""" global localfiles global localfile global repofile global repofiles global userfile global userfiles global sysfile global sysfiles global CONFIG logging.basicConfig(level='INFO') config = ConfigParser.ConfigParser(allow_no_value=True) modulepath = os.path.dirname(os.path.abspath(__file__)) localfile = os.path.join(os.getcwd(), 'hydra.ini') localfiles = glob.glob(localfile) repofile = os.path.join(modulepath, 'hydra.ini') repofiles = glob.glob(repofile) if os.name == 'nt': import winpaths userfile = os.path.join(os.path.expanduser('~'),'AppData','Local','hydra.ini') userfiles = glob.glob(userfile) sysfile = os.path.join(winpaths.get_common_documents(), 'Hydra','hydra.ini') sysfiles = glob.glob(sysfile) else: userfile = os.path.join(os.path.expanduser('~'), '.hydra', 'hydra.ini') userfiles = glob.glob(userfile) sysfile = os.path.join('etc','hydra','hydra.ini') sysfiles = glob.glob(sysfile) for ini_file in repofiles: logging.debug("Repofile: %s"%ini_file) config.read(ini_file) for ini_file in sysfiles: logging.debug("Sysfile: %s"%ini_file) config.read(ini_file) for ini_file in userfiles: logging.debug("Userfile: %s"%ini_file) config.read(ini_file) for ini_file in localfiles: logging.info("Localfile: %s"%ini_file) config.read(ini_file) env_value = os.environ.get('HYDRA_CONFIG') if env_value is not None: if os.path.exists(env_value): config.read(ini_file) else: logging.warning('HYDRA_CONFIG set as %s but file does not exist', env_value) if os.name == 'nt': set_windows_env_variables(config) try: home_dir = config.get('DEFAULT', 'home_dir') except: home_dir = os.environ.get('HYDRA_HOME_DIR', '~') config.set('DEFAULT', 'home_dir', os.path.expanduser(home_dir)) try: hydra_base = config.get('DEFAULT', 'hydra_base_dir') except: hydra_base = os.environ.get('HYDRA_BASE_DIR', modulepath) config.set('DEFAULT', 'hydra_base_dir', os.path.expanduser(hydra_base)) read_values_from_environment(config, 'mysqld', 'server_name') CONFIG = config return config
Load a config file. This function looks for a config (*.ini) file in the following order:: (1) ./*.ini (2) ~/.config/hydra/ (3) /etc/hydra (4) /path/to/hydra_base/*.ini (1) will override (2) will override (3) will override (4). Parameters not defined in (1) will be taken from (2). Parameters not defined in (2) will be taken from (3). (3) is the config folder that will be checked out from the svn repository. (2) Will be be provided as soon as an installable distribution is available. (1) will usually be written individually by every user.
entailment
def create_mysql_db(db_url): """ To simplify deployment, create the mysql DB if it's not there. Accepts a URL with or without a DB name stated, and returns a db url containing the db name for use in the main sqlalchemy engine. THe formats can take the following form: mysql+driver://username:password@hostname mysql+driver://username:password@hostname/dbname if no DB name is specified, it is retrieved from config """ #Remove trailing whitespace and forwardslashes db_url = db_url.strip().strip('/') #Check this is a mysql URL if db_url.find('mysql') >= 0: #Get the DB name from config and check if it's in the URL db_name = config.get('mysqld', 'db_name', 'hydradb') if db_url.find(db_name) >= 0: no_db_url = db_url.rsplit("/", 1)[0] else: #Check that there is a hostname specified, as we'll be using the '@' symbol soon.. if db_url.find('@') == -1: raise HydraError("No Hostname specified in DB url") #Check if there's a DB name specified that's different to the one in config. host_and_db_name = db_url.split('@')[1] if host_and_db_name.find('/') >= 0: no_db_url, db_name = db_url.rsplit("/", 1) else: no_db_url = db_url db_url = no_db_url + "/" + db_name db_url = "{}?charset=utf8&use_unicode=1".format(db_url) if config.get('mysqld', 'auto_create', 'Y') == 'Y': tmp_engine = create_engine(no_db_url) log.warning("Creating database {0} as it does not exist.".format(db_name)) tmp_engine.execute("CREATE DATABASE IF NOT EXISTS {0}".format(db_name)) return db_url
To simplify deployment, create the mysql DB if it's not there. Accepts a URL with or without a DB name stated, and returns a db url containing the db name for use in the main sqlalchemy engine. THe formats can take the following form: mysql+driver://username:password@hostname mysql+driver://username:password@hostname/dbname if no DB name is specified, it is retrieved from config
entailment
def add_project(project,**kwargs): """ Add a new project returns a project complexmodel """ user_id = kwargs.get('user_id') existing_proj = get_project_by_name(project.name,user_id=user_id) if len(existing_proj) > 0: raise HydraError("A Project with the name \"%s\" already exists"%(project.name,)) #check_perm(user_id, 'add_project') proj_i = Project() proj_i.name = project.name proj_i.description = project.description proj_i.created_by = user_id attr_map = hdb.add_resource_attributes(proj_i, project.attributes) db.DBSession.flush() #Needed to get the resource attr's ID proj_data = _add_project_attribute_data(proj_i, attr_map, project.attribute_data) proj_i.attribute_data = proj_data proj_i.set_owner(user_id) db.DBSession.add(proj_i) db.DBSession.flush() return proj_i
Add a new project returns a project complexmodel
entailment
def update_project(project,**kwargs): """ Update a project returns a project complexmodel """ user_id = kwargs.get('user_id') #check_perm(user_id, 'update_project') proj_i = _get_project(project.id) proj_i.check_write_permission(user_id) proj_i.name = project.name proj_i.description = project.description attr_map = hdb.add_resource_attributes(proj_i, project.attributes) proj_data = _add_project_attribute_data(proj_i, attr_map, project.attribute_data) proj_i.attribute_data = proj_data db.DBSession.flush() return proj_i
Update a project returns a project complexmodel
entailment
def get_project(project_id, include_deleted_networks=False, **kwargs): """ get a project complexmodel """ user_id = kwargs.get('user_id') proj_i = _get_project(project_id) #lazy load owners proj_i.owners proj_i.check_read_permission(user_id) proj_j = JSONObject(proj_i) proj_j.networks = [] for net_i in proj_i.networks: #lazy load owners net_i.owners net_i.scenarios if include_deleted_networks==False and net_i.status.lower() == 'x': continue can_read_network = net_i.check_read_permission(user_id, do_raise=False) if can_read_network is False: continue net_j = JSONObject(net_i) proj_j.networks.append(net_j) return proj_j
get a project complexmodel
entailment
def get_project_by_network_id(network_id,**kwargs): """ get a project complexmodel by a network_id """ user_id = kwargs.get('user_id') projects_i = db.DBSession.query(Project).join(ProjectOwner).join(Network, Project.id==Network.project_id).filter( Network.id==network_id, ProjectOwner.user_id==user_id).order_by('name').all() ret_project = None for project_i in projects_i: try: project_i.check_read_permission(user_id) ret_project = project_i except: log.info("Can't return project %s. User %s does not have permission to read it.", project_i.id, user_id) return ret_project
get a project complexmodel by a network_id
entailment
def get_project_by_name(project_name,**kwargs): """ get a project complexmodel """ user_id = kwargs.get('user_id') projects_i = db.DBSession.query(Project).join(ProjectOwner).filter( Project.name==project_name, ProjectOwner.user_id==user_id).order_by('name').all() ret_projects = [] for project_i in projects_i: try: project_i.check_read_permission(user_id) ret_projects.append(project_i) except: log.info("Can't return project %s. User %s does not have permission to read it.", project_i.id, user_id) return ret_projects
get a project complexmodel
entailment
def to_named_tuple(obj, visited_children=None, back_relationships=None, levels=None, ignore=[], extras={}): """ Altered from an example found on stackoverflow http://stackoverflow.com/questions/23554119/convert-sqlalchemy-orm-result-to-dict """ if visited_children is None: visited_children = [] if back_relationships is None: back_relationships = [] serialized_data = {c.key: getattr(obj, c.key) for c in obj.__table__.columns} #Any other non-column data to include in the keyed tuple for k, v in extras.items(): serialized_data[k] = v relationships = class_mapper(obj.__class__).relationships #Set the attributes to 'None' first, so the attributes are there, even if they don't #get filled in: for name, relation in relationships.items(): if relation.uselist: serialized_data[name] = tuple([]) else: serialized_data[name] = None visitable_relationships = [(name, rel) for name, rel in relationships.items() if name not in back_relationships] if levels is not None and levels > 0: for name, relation in visitable_relationships: levels = levels - 1 if name in ignore: continue if relation.backref: back_relationships.append(relation.backref) relationship_children = getattr(obj, name) if relationship_children is not None: if relation.uselist: children = [] for child in [c for c in relationship_children if c not in visited_children]: visited_children.append(child) children.append(to_named_tuple(child, visited_children, back_relationships, ignore=ignore, levels=levels)) serialized_data[name] = tuple(children) else: serialized_data[name] = to_named_tuple(relationship_children, visited_children, back_relationships, ignore=ignore, levels=levels) vals = [] cols = [] for k, v in serialized_data.items(): vals.append(k) cols.append(v) result = KeyedTuple(cols, vals) return result
Altered from an example found on stackoverflow http://stackoverflow.com/questions/23554119/convert-sqlalchemy-orm-result-to-dict
entailment
def get_projects(uid, include_shared_projects=True, projects_ids_list_filter=None, **kwargs): """ Get all the projects owned by the specified user. These include projects created by the user, but also ones shared with the user. For shared projects, only include networks in those projects which are accessible to the user. the include_shared_projects flag indicates whether to include projects which have been shared with the user, or to only return projects created directly by this user. """ req_user_id = kwargs.get('user_id') ##Don't load the project's networks. Load them separately, as the networks #must be checked individually for ownership projects_qry = db.DBSession.query(Project) log.info("Getting projects for %s", uid) if include_shared_projects is True: projects_qry = projects_qry.join(ProjectOwner).filter(Project.status=='A', or_(ProjectOwner.user_id==uid, Project.created_by==uid)) else: projects_qry = projects_qry.join(ProjectOwner).filter(Project.created_by==uid) if projects_ids_list_filter is not None: # Filtering the search of project id if isinstance(projects_ids_list_filter, str): # Trying to read a csv string projects_ids_list_filter = eval(projects_ids_list_filter) if type(projects_ids_list_filter) is int: projects_qry = projects_qry.filter(Project.id==projects_ids_list_filter) else: projects_qry = projects_qry.filter(Project.id.in_(projects_ids_list_filter)) projects_qry = projects_qry.options(noload('networks')).order_by('id') projects_i = projects_qry.all() log.info("Project query done for user %s. %s projects found", uid, len(projects_i)) user = db.DBSession.query(User).filter(User.id==req_user_id).one() isadmin = user.is_admin() #Load each projects_j = [] for project_i in projects_i: #Ensure the requesting user is allowed to see the project project_i.check_read_permission(req_user_id) #lazy load owners project_i.owners network_qry = db.DBSession.query(Network)\ .filter(Network.project_id==project_i.id,\ Network.status=='A') if not isadmin: network_qry.outerjoin(NetworkOwner)\ .filter(or_( and_(NetworkOwner.user_id != None, NetworkOwner.view == 'Y'), Network.created_by == uid )) networks_i = network_qry.all() networks_j = [] for network_i in networks_i: network_i.owners net_j = JSONObject(network_i) if net_j.layout is not None: net_j.layout = JSONObject(net_j.layout) else: net_j.layout = JSONObject({}) networks_j.append(net_j) project_j = JSONObject(project_i) project_j.networks = networks_j projects_j.append(project_j) log.info("Networks loaded projects for user %s", uid) return projects_j
Get all the projects owned by the specified user. These include projects created by the user, but also ones shared with the user. For shared projects, only include networks in those projects which are accessible to the user. the include_shared_projects flag indicates whether to include projects which have been shared with the user, or to only return projects created directly by this user.
entailment
def set_project_status(project_id, status, **kwargs): """ Set the status of a project to 'X' """ user_id = kwargs.get('user_id') #check_perm(user_id, 'delete_project') project = _get_project(project_id) project.check_write_permission(user_id) project.status = status db.DBSession.flush()
Set the status of a project to 'X'
entailment
def delete_project(project_id,**kwargs): """ Set the status of a project to 'X' """ user_id = kwargs.get('user_id') #check_perm(user_id, 'delete_project') project = _get_project(project_id) project.check_write_permission(user_id) db.DBSession.delete(project) db.DBSession.flush() return 'OK'
Set the status of a project to 'X'
entailment
def get_networks(project_id, include_data='N', **kwargs): """ Get all networks in a project Returns an array of network objects. """ log.info("Getting networks for project %s", project_id) user_id = kwargs.get('user_id') project = _get_project(project_id) project.check_read_permission(user_id) rs = db.DBSession.query(Network.id, Network.status).filter(Network.project_id==project_id).all() networks=[] for r in rs: if r.status != 'A': continue try: net = network.get_network(r.id, summary=True, include_data=include_data, **kwargs) log.info("Network %s retrieved", net.name) networks.append(net) except PermissionError: log.info("Not returning network %s as user %s does not have " "permission to read it."%(r.id, user_id)) return networks
Get all networks in a project Returns an array of network objects.
entailment
def get_network_project(network_id, **kwargs): """ get the project that a network is in """ net_proj = db.DBSession.query(Project).join(Network, and_(Project.id==Network.id, Network.id==network_id)).first() if net_proj is None: raise HydraError("Network %s not found"% network_id) return net_proj
get the project that a network is in
entailment
def add_resource_types(resource_i, types): """ Save a reference to the types used for this resource. @returns a list of type_ids representing the type ids on the resource. """ if types is None: return [] existing_type_ids = [] if resource_i.types: for t in resource_i.types: existing_type_ids.append(t.type_id) new_type_ids = [] for templatetype in types: if templatetype.id in existing_type_ids: continue rt_i = ResourceType() rt_i.type_id = templatetype.id rt_i.ref_key = resource_i.ref_key if resource_i.ref_key == 'NODE': rt_i.node_id = resource_i.id elif resource_i.ref_key == 'LINK': rt_i.link_id = resource_i.id elif resource_i.ref_key == 'GROUP': rt_i.group_id = resource_i.id resource_i.types.append(rt_i) new_type_ids.append(templatetype.id) return new_type_ids
Save a reference to the types used for this resource. @returns a list of type_ids representing the type ids on the resource.
entailment
def create_default_users_and_perms(): """ Adds the roles and perm to the DB. It adds only roles, perms and links between them that are not inside the db It is possible adding new role or perm and connecting them just modifiying the following lists """ # perms = db.DBSession.query(Perm).all() # if len(perms) > 0: # return default_perms = ( ("add_user", "Add User"), ("edit_user", "Edit User"), ("add_role", "Add Role"), ("edit_role", "Edit Role"), ("add_perm", "Add Permission"), ("edit_perm", "Edit Permission"), ("add_network", "Add network"), ("edit_network", "Edit network"), ("delete_network", "Delete network"), ("share_network", "Share network"), ("edit_topology", "Edit network topology"), ("add_project", "Add Project"), ("edit_project", "Edit Project"), ("delete_project", "Delete Project"), ("share_project", "Share Project"), ("edit_data", "Edit network data"), ("view_data", "View network data"), ("add_template", "Add Template"), ("edit_template", "Edit Template"), ("add_dimension", "Add Dimension"), ("update_dimension", "Update Dimension"), ("delete_dimension", "Delete Dimension"), ("add_unit", "Add Unit"), ("update_unit", "Update Unit"), ("delete_unit", "Delete Unit") ) default_roles = ( ("admin", "Administrator"), ("dev", "Developer"), ("modeller", "Modeller / Analyst"), ("manager", "Manager"), ("grad", "Graduate"), ("developer", "Developer"), ("decision", "Decision Maker"), ) roleperms = ( # Admin permissions ('admin', "add_user"), ('admin', "edit_user"), ('admin', "add_role"), ('admin', "edit_role"), ('admin', "add_perm"), ('admin', "edit_perm"), ('admin', "add_network"), ('admin', "edit_network"), ('admin', "delete_network"), ('admin', "share_network"), ('admin', "add_project"), ('admin', "edit_project"), ('admin', "delete_project"), ('admin', "share_project"), ('admin', "edit_topology"), ('admin', "edit_data"), ('admin', "view_data"), ('admin', "add_template"), ('admin', "edit_template"), ('admin', "add_dimension"), ('admin', "update_dimension"), ('admin', "delete_dimension"), ('admin', "add_unit"), ('admin', "update_unit"), ('admin', "delete_unit"), # Developer permissions ("developer", "add_network"), ("developer", "edit_network"), ("developer", "delete_network"), ("developer", "share_network"), ("developer", "add_project"), ("developer", "edit_project"), ("developer", "delete_project"), ("developer", "share_project"), ("developer", "edit_topology"), ("developer", "edit_data"), ("developer", "view_data"), ("developer", "add_template"), ("developer", "edit_template"), ('developer', "add_dimension"), ('developer', "update_dimension"), ('developer', "delete_dimension"), ('developer', "add_unit"), ('developer', "update_unit"), ('developer', "delete_unit"), # modeller permissions ("modeller", "add_network"), ("modeller", "edit_network"), ("modeller", "delete_network"), ("modeller", "share_network"), ("modeller", "edit_topology"), ("modeller", "add_project"), ("modeller", "edit_project"), ("modeller", "delete_project"), ("modeller", "share_project"), ("modeller", "edit_data"), ("modeller", "view_data"), # Manager permissions ("manager", "edit_data"), ("manager", "view_data"), ) # Map for code to ID id_maps_dict = { "perm": {}, "role": {} } # Adding perms perm_dict = {} for code, name in default_perms: perm = Perm(code=code, name=name) perm_dict[code] = perm perms_by_name = db.DBSession.query(Perm).filter(Perm.code==code).all() if len(perms_by_name)==0: # Adding perm log.debug("# Adding PERM {}".format(code)) db.DBSession.add(perm) db.DBSession.flush() perm_by_name = db.DBSession.query(Perm).filter(Perm.code==code).one() id_maps_dict["perm"][code] = perm_by_name.id # Adding roles role_dict = {} for code, name in default_roles: role = Role(code=code, name=name) role_dict[code] = role roles_by_name = db.DBSession.query(Role).filter(Role.code==code).all() if len(roles_by_name)==0: # Adding perm log.debug("# Adding ROLE {}".format(code)) db.DBSession.add(role) db.DBSession.flush() role_by_name = db.DBSession.query(Role).filter(Role.code==code).one() id_maps_dict["role"][code] = role_by_name.id # Adding connections for role_code, perm_code in roleperms: #log.info("Link Role:{}({}) <---> Perm:{}({})".format(role_code, id_maps_dict["role"][role_code], perm_code, id_maps_dict["perm"][perm_code])) links_found = db.DBSession.query(RolePerm).filter(RolePerm.role_id==id_maps_dict["role"][role_code]).filter(RolePerm.perm_id==id_maps_dict["perm"][perm_code]).all() if len(links_found)==0: # Adding link log.debug("# Adding link") roleperm = RolePerm() # roleperm.role = role_dict[role_code] # roleperm.perm = perm_dict[perm_code] roleperm.role_id = id_maps_dict["role"][role_code] roleperm.perm_id = id_maps_dict["perm"][perm_code] db.DBSession.add(roleperm) db.DBSession.flush() db.DBSession.flush()
Adds the roles and perm to the DB. It adds only roles, perms and links between them that are not inside the db It is possible adding new role or perm and connecting them just modifiying the following lists
entailment
def create_default_units_and_dimensions(): """ Adds the units and the dimensions reading a json file. It adds only dimensions and units that are not inside the db It is possible adding new dimensions and units to the DB just modifiyin the json file """ default_units_file_location = os.path.realpath(\ os.path.join(os.path.dirname(os.path.realpath(__file__)), '../', 'static', 'default_units_and_dimensions.json')) d=None with open(default_units_file_location) as json_data: d = json.load(json_data) json_data.close() for json_dimension in d["dimension"]: new_dimension = None dimension_name = get_utf8_encoded_string(json_dimension["name"]) db_dimensions_by_name = db.DBSession.query(Dimension).filter(Dimension.name==dimension_name).all() if len(db_dimensions_by_name) == 0: # Adding the dimension log.debug("Adding Dimension `{}`".format(dimension_name)) new_dimension = Dimension() if "id" in json_dimension: # If ID is specified new_dimension.id = json_dimension["id"] new_dimension.name = dimension_name db.DBSession.add(new_dimension) db.DBSession.flush() # Get the dimension by name new_dimension = get_dimension_from_db_by_name(dimension_name) for json_unit in json_dimension["unit"]: db_units_by_name = db.DBSession.query(Unit).filter(Unit.abbreviation==get_utf8_encoded_string(json_unit['abbr'])).all() if len(db_units_by_name) == 0: # Adding the unit log.debug("Adding Unit %s in %s",json_unit['abbr'], json_dimension["name"]) new_unit = Unit() if "id" in json_unit: new_unit.id = json_unit["id"] new_unit.dimension_id = new_dimension.id new_unit.name = get_utf8_encoded_string(json_unit['name']) new_unit.abbreviation = get_utf8_encoded_string(json_unit['abbr']) new_unit.lf = get_utf8_encoded_string(json_unit['lf']) new_unit.cf = get_utf8_encoded_string(json_unit['cf']) if "description" in json_unit: # If Description is specified new_unit.description = get_utf8_encoded_string(json_unit["description"]) # Save on DB db.DBSession.add(new_unit) db.DBSession.flush() else: #log.critical("UNIT {}.{} EXISTANT".format(dimension_name,json_unit['abbr'])) pass try: # Needed for test. on HWI it fails so we need to catch the exception and pass by db.DBSession.commit() except Exception as e: # Needed for HWI pass return
Adds the units and the dimensions reading a json file. It adds only dimensions and units that are not inside the db It is possible adding new dimensions and units to the DB just modifiyin the json file
entailment
def get_dimension_from_db_by_name(dimension_name): """ Gets a dimension from the DB table. """ try: dimension = db.DBSession.query(Dimension).filter(Dimension.name==dimension_name).one() return JSONObject(dimension) except NoResultFound: raise ResourceNotFoundError("Dimension %s not found"%(dimension_name))
Gets a dimension from the DB table.
entailment
def get_rules(scenario_id, **kwargs): """ Get all the rules for a given scenario. """ rules = db.DBSession.query(Rule).filter(Rule.scenario_id==scenario_id, Rule.status=='A').all() return rules
Get all the rules for a given scenario.
entailment
def get_attribute_by_id(attr_id, **kwargs): """ Get a specific attribute by its ID. """ try: attr_i = db.DBSession.query(Attr).filter(Attr.id==attr_id).one() return attr_i except NoResultFound: return None
Get a specific attribute by its ID.
entailment
def get_template_attributes(template_id, **kwargs): """ Get a specific attribute by its ID. """ try: attrs_i = db.DBSession.query(Attr).filter(TemplateType.template_id==template_id).filter(TypeAttr.type_id==TemplateType.id).filter(Attr.id==TypeAttr.id).all() log.debug(attrs_i) return attrs_i except NoResultFound: return None
Get a specific attribute by its ID.
entailment
def get_attribute_by_name_and_dimension(name, dimension_id=None,**kwargs): """ Get a specific attribute by its name. dimension_id can be None, because in attribute the dimension_id is not anymore mandatory """ try: attr_i = db.DBSession.query(Attr).filter(and_(Attr.name==name, Attr.dimension_id==dimension_id)).one() log.debug("Attribute retrieved") return attr_i except NoResultFound: return None
Get a specific attribute by its name. dimension_id can be None, because in attribute the dimension_id is not anymore mandatory
entailment
def add_attribute(attr,**kwargs): """ Add a generic attribute, which can then be used in creating a resource attribute, and put into a type. .. code-block:: python (Attr){ id = 1020 name = "Test Attr" dimension_id = 123 } """ log.debug("Adding attribute: %s", attr.name) try: attr_i = db.DBSession.query(Attr).filter(Attr.name == attr.name, Attr.dimension_id == attr.dimension_id).one() log.info("Attr already exists") except NoResultFound: attr_i = Attr(name = attr.name, dimension_id = attr.dimension_id) attr_i.description = attr.description db.DBSession.add(attr_i) db.DBSession.flush() log.info("New attr added") return JSONObject(attr_i)
Add a generic attribute, which can then be used in creating a resource attribute, and put into a type. .. code-block:: python (Attr){ id = 1020 name = "Test Attr" dimension_id = 123 }
entailment
def update_attribute(attr,**kwargs): """ Add a generic attribute, which can then be used in creating a resource attribute, and put into a type. .. code-block:: python (Attr){ id = 1020 name = "Test Attr" dimension_id = 123 } """ log.debug("Updating attribute: %s", attr.name) attr_i = _get_attr(attr.id) attr_i.name = attr.name attr_i.dimension_id = attr.dimension_id attr_i.description = attr.description #Make sure an update hasn't caused an inconsistency. #check_sion(attr_i.id) db.DBSession.flush() return JSONObject(attr_i)
Add a generic attribute, which can then be used in creating a resource attribute, and put into a type. .. code-block:: python (Attr){ id = 1020 name = "Test Attr" dimension_id = 123 }
entailment
def add_attributes(attrs,**kwargs): """ Add a list of generic attributes, which can then be used in creating a resource attribute, and put into a type. .. code-block:: python (Attr){ id = 1020 name = "Test Attr" dimen = "very big" } """ #Check to see if any of the attributs being added are already there. #If they are there already, don't add a new one. If an attribute #with the same name is there already but with a different dimension, #add a new attribute. # All existing attributes all_attrs = db.DBSession.query(Attr).all() attr_dict = {} for attr in all_attrs: attr_dict[(attr.name.lower(), attr.dimension_id)] = JSONObject(attr) attrs_to_add = [] existing_attrs = [] for potential_new_attr in attrs: if potential_new_attr is not None: # If the attrinute is None we cannot manage it log.debug("Adding attribute: %s", potential_new_attr) if attr_dict.get((potential_new_attr.name.lower(), potential_new_attr.dimension_id)) is None: attrs_to_add.append(JSONObject(potential_new_attr)) else: existing_attrs.append(attr_dict.get((potential_new_attr.name.lower(), potential_new_attr.dimension_id))) new_attrs = [] for attr in attrs_to_add: attr_i = Attr() attr_i.name = attr.name attr_i.dimension_id = attr.dimension_id attr_i.description = attr.description db.DBSession.add(attr_i) new_attrs.append(attr_i) db.DBSession.flush() new_attrs = new_attrs + existing_attrs return [JSONObject(a) for a in new_attrs]
Add a list of generic attributes, which can then be used in creating a resource attribute, and put into a type. .. code-block:: python (Attr){ id = 1020 name = "Test Attr" dimen = "very big" }
entailment
def get_attributes(**kwargs): """ Get all attributes """ attrs = db.DBSession.query(Attr).order_by(Attr.name).all() return attrs
Get all attributes
entailment