sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def update(self, client): """Execute update command on instance.""" update_cmd = "{sudo} '{refresh};{update}'".format( sudo=self.get_sudo_exec_wrapper(), refresh=self.get_refresh_repo_cmd(), update=self.get_update_cmd() ) out = '' try: out = ipa_utils.execute_ssh_command( client, update_cmd ) except Exception as error: raise IpaDistroException( 'An error occurred updating instance: %s' % error ) return out
Execute update command on instance.
entailment
def annotate(self, content, includeCat=None, excludeCat=None, minLength=None, longestOnly=None, includeAbbrev=None, includeAcronym=None, includeNumbers=None, output='text/plain; charset=utf-8'): """ Annotate text from: /annotations Arguments: content: The content to annotate includeCat: A set of categories to include excludeCat: A set of categories to exclude minLength: The minimum number of characters in annotated entities longestOnly: Should only the longest entity be returned for an overlapping group includeAbbrev: Should abbreviations be included includeAcronym: Should acronyms be included includeNumbers: Should numbers be included outputs: text/plain; charset=utf-8 """ kwargs = {'content':content, 'includeCat':includeCat, 'excludeCat':excludeCat, 'minLength':minLength, 'longestOnly':longestOnly, 'includeAbbrev':includeAbbrev, 'includeAcronym':includeAcronym, 'includeNumbers':includeNumbers} kwargs = {k:dumps(v) if builtins.type(v) is dict else v for k, v in kwargs.items()} param_rest = self._make_rest(None, **kwargs) url = self._basePath + ('/annotations').format(**kwargs) requests_params = kwargs output = self._get('GET', url, requests_params, output) return output if output else None
Annotate text from: /annotations Arguments: content: The content to annotate includeCat: A set of categories to include excludeCat: A set of categories to exclude minLength: The minimum number of characters in annotated entities longestOnly: Should only the longest entity be returned for an overlapping group includeAbbrev: Should abbreviations be included includeAcronym: Should acronyms be included includeNumbers: Should numbers be included outputs: text/plain; charset=utf-8
entailment
def reachableFrom(self, id, hint=None, relationships=None, lbls=None, callback=None, output='application/json'): """ Get all the nodes reachable from a starting point, traversing the provided edges. from: /graph/reachablefrom/{id} Arguments: id: The type of the edge hint: A label hint to find the start node. relationships: A list of relationships to traverse, in order. Supports cypher operations such as relA|relB or relA*. lbls: A list of node labels to filter. callback: Name of the JSONP callback ('fn' by default). Supplying this parameter or requesting a javascript media type will cause a JSONP response to be rendered. outputs: application/json application/graphson application/xml application/graphml+xml application/xgmml text/gml text/csv text/tab-separated-values image/jpeg image/png """ if id and id.startswith('http:'): id = parse.quote(id, safe='') kwargs = {'id':id, 'hint':hint, 'relationships':relationships, 'lbls':lbls, 'callback':callback} kwargs = {k:dumps(v) if builtins.type(v) is dict else v for k, v in kwargs.items()} param_rest = self._make_rest('id', **kwargs) url = self._basePath + ('/graph/reachablefrom/{id}').format(**kwargs) requests_params = {k:v for k, v in kwargs.items() if k != 'id'} output = self._get('GET', url, requests_params, output) return output if output else []
Get all the nodes reachable from a starting point, traversing the provided edges. from: /graph/reachablefrom/{id} Arguments: id: The type of the edge hint: A label hint to find the start node. relationships: A list of relationships to traverse, in order. Supports cypher operations such as relA|relB or relA*. lbls: A list of node labels to filter. callback: Name of the JSONP callback ('fn' by default). Supplying this parameter or requesting a javascript media type will cause a JSONP response to be rendered. outputs: application/json application/graphson application/xml application/graphml+xml application/xgmml text/gml text/csv text/tab-separated-values image/jpeg image/png
entailment
def ordered(start, edges, predicate=None, inverse=False): """ Depth first edges from a SciGraph response. """ s, o = 'sub', 'obj' if inverse: s, o = o, s for edge in edges: if predicate is not None and edge['pred'] != predicate: print('scoop!') continue if edge[s] == start: yield edge yield from Graph.ordered(edge[o], edges, predicate=predicate)
Depth first edges from a SciGraph response.
entailment
def tcsort(item): # FIXME SUCH WOW SO INEFFICIENT O_O """ get len of transitive closure assume type items is tree... """ return len(item[1]) + sum(tcsort(kv) for kv in item[1].items())
get len of transitive closure assume type items is tree...
entailment
def get_node(start, tree, pnames): """ for each parent find a single branch to root """ def get_first_branch(node): if node not in pnames: # one way to hit a root return [] if pnames[node]: # mmmm names fp = pnames[node][0] if cycle_check(node, fp, pnames): fp = pnames[node][1] # if there are double cycles I WILL KILL FOR THE PLEASURE IF IT print(fp) return [fp] + get_first_branch(fp) else: return [] branch = get_first_branch(start) for n in branch[::-1]: tree = tree[n] assert start in tree, "our start wasnt in the tree! OH NO!" branch = [start] + branch print('branch', branch) return tree, branch
for each parent find a single branch to root
entailment
def dematerialize(parent_name, parent_node): # FIXME we need to demat more than just leaves! #FIXME still an issue: Fornix, Striatum, Diagonal Band """ Remove nodes higher in the tree that occur further down the SAME branch. If they occur down OTHER branchs leave them alone. NOTE: modifies in place! """ lleaves = {} children = parent_node[parent_name] if not children: # children could be empty ? i think this only happens @ root? #print('at bottom', parent_name) lleaves[parent_name] = None return lleaves children_ord = reversed(sorted(sorted(((k, v) for k, v in children.items()), key=alphasortkey), #key=lambda a: f'{a[0]}'.split('>')[1] if '>' in f'{a[0]}' else f'a[0]'), #key=lambda a: a[0].split('>') if '>' in a[0] else a[0]), key=tcsort)) # make sure we hit deepest first for child_name, _ in children_ord: # get list so we can go ahead and pop #print(child_name) new_lleaves = dematerialize(child_name, children) if child_name == 'magnetic resonance imaging': # debugging failing demat pass #embed() if child_name in new_lleaves or all(l in lleaves for l in new_lleaves): # if it is a leaf or all childs are leaves as well if child_name in lleaves: # if it has previously been identified as a leaf! #print('MATERIALIZATION DETECTED! LOWER PARENT:', #lleaves[child_name],'ZAPPING!:', child_name, #'OF PARENT:', parent_name) children.pop(child_name) #print('cn', child_name, 'pn', parent_name, 'BOTTOM') #else: # if it has NOT previously been identified as a leaf, add the parent! #new_lleaves[child_name] = parent_name # pass it back up to nodes above #print('cn', child_name, 'pn', parent_name) #else: # it is a node but we want to dematerizlize them too! lleaves[child_name] = parent_name lleaves.update(new_lleaves) return lleaves
Remove nodes higher in the tree that occur further down the SAME branch. If they occur down OTHER branchs leave them alone. NOTE: modifies in place!
entailment
def inv_edges(json): """Switch obj/sub for a set of edges (makes fixing known inverse edges MUCH easier)""" for edge in json['edges']: sub, obj = edge['sub'], edge['obj'] edge['sub'] = obj edge['obj'] = sub edge['pred'] += 'INVERTED'
Switch obj/sub for a set of edges (makes fixing known inverse edges MUCH easier)
entailment
def collect_results(results_file): """Return the result (pass/fail) for json file.""" with open(results_file, 'r') as results: data = json.load(results) return data
Return the result (pass/fail) for json file.
entailment
def get_terms(self): ''' GROUP BY is a shortcut to only getting the first in every list of group ''' if not self.terms.empty: return self.terms if self.from_backup: self.terms = open_pickle(TERMS_BACKUP_PATH) return self.terms engine = create_engine(self.db_url) data = """ SELECT t.id as tid, t.ilx, t.label, t.definition, t.type, t.comment, t.version FROM terms t GROUP BY t.ilx """ self.terms = pd.read_sql(data, engine) create_pickle(self.terms, TERMS_BACKUP_PATH) return self.terms
GROUP BY is a shortcut to only getting the first in every list of group
entailment
def get_terms_complete(self) -> pd.DataFrame: ''' Gets complete entity data like term/view ''' if not self.terms_complete.empty: return self.terms_complete if self.from_backup: self.terms_complete = open_pickle(TERMS_COMPLETE_BACKUP_PATH) return self.terms_complete ilx2synonyms = self.get_ilx2synonyms() ilx2existing_ids = self.get_ilx2existing_ids() ilx2annotations = self.get_ilx2annotations() ilx2superclass = self.get_ilx2superclass() ilx_complete = [] header = ['Index'] + list(self.fetch_terms().columns) for row in self.fetch_terms().itertuples(): row = {header[i]:val for i, val in enumerate(row)} row['synonyms'] = ilx2synonyms.get(row['ilx']) row['existing_ids'] = ilx2existing_ids[row['ilx']] # if breaks we have worse problems row['annotations'] = ilx2annotations.get(row['ilx']) row['superclass'] = ilx2superclass.get(row['ilx']) ilx_complete.append(row) terms_complete = pd.DataFrame(ilx_complete) create_pickle(terms_complete, TERMS_COMPLETE_BACKUP_PATH) return terms_complete
Gets complete entity data like term/view
entailment
def get_ilx2superclass(self, clean:bool=True): ''' clean: for list of literals only ''' ilx2superclass = defaultdict(list) header = ['Index'] + list(self.fetch_superclasses().columns) for row in self.fetch_superclasses().itertuples(): row = {header[i]:val for i, val in enumerate(row)} if clean: superclass = { 'tid': row['superclass_tid'], 'ilx': row['superclass_ilx'], } ilx2superclass[row['term_ilx']].append(superclass) elif not clean: ilx2superclass[row['term_ilx']].append(row) return ilx2superclass
clean: for list of literals only
entailment
def get_tid2annotations(self, clean:bool=True): ''' clean: for list of literals only ''' tid2annotations = defaultdict(list) header = ['Index'] + list(self.fetch_annotations().columns) for row in self.fetch_annotations().itertuples(): row = {header[i]:val for i, val in enumerate(row)} if clean: annotation = { 'tid': row['tid'], 'annotation_type_tid': row['annotation_type_tid'], 'value': row['value'], 'annotation_type_label': row['annotation_type_label'], } tid2annotations[row['tid']].append(annotation) elif not clean: tid2annotations[row['tid']].append(row) return tid2annotations
clean: for list of literals only
entailment
def get_tid2synonyms(self, clean:bool=True): ''' clean: for list of literals only ''' tid2synonyms = {} header = ['Index'] + list(self.fetch_synonyms().columns) for row in self.fetch_synonyms().itertuples(): row = {header[i]:val for i, val in enumerate(row)} if clean: synonym = {'literal':row['literal'], 'type':row['type']} tid2synonyms[row['tid']].append(synonym) elif not clean: tid2synonyms[row['tid']].append(row) return tid2synonyms
clean: for list of literals only
entailment
def get_ilx2synonyms(self, clean:bool=True): ''' clean: for list of literals only ''' ilx2synonyms = defaultdict(list) header = ['Index'] + list(self.fetch_synonyms().columns) for row in self.fetch_synonyms().itertuples(): row = {header[i]:val for i, val in enumerate(row)} if clean: synonym = {'literal':row['literal'], 'type':row['type']} ilx2synonyms[row['ilx']].append(synonym) elif not clean: ilx2synonyms[row['ilx']].append(row) return ilx2synonyms
clean: for list of literals only
entailment
def superclasses_bug_fix(data): ''' PHP returns "id" in superclass but only accepts superclass_tid ''' for i, value in enumerate(data['superclasses']): data['superclasses'][i]['superclass_tid'] = data['superclasses'][i].pop('id') return data
PHP returns "id" in superclass but only accepts superclass_tid
entailment
def log_info(self, data): ''' Logs successful responses ''' info = 'label={label}, id={id}, ilx={ilx}, superclass_tid={super_id}' info_filled = info.format(label = data['label'], id = data['id'], ilx = data['ilx'], super_id = data['superclasses'][0]['id']) logging.info(info_filled) return info_filled
Logs successful responses
entailment
def get(self, url): ''' Requests data from database ''' req = r.get(url, headers = self.headers, auth = self.auth) return self.process_request(req)
Requests data from database
entailment
def post(self, url, data): ''' Gives data to database ''' data.update({'key': self.APIKEY}) req = r.post(url, data = json.dumps(data), headers = self.headers, auth = self.auth) return self.process_request(req)
Gives data to database
entailment
def process_request(self, req): ''' Checks to see if data returned from database is useable ''' # Check status code of request req.raise_for_status() # if codes not in 200s; error raise # Proper status code, but check if server returned a warning try: output = req.json() except: exit(req.text) # server returned html error # Try to find an error msg in the server response try: error = output['data'].get('errormsg') except: error = output.get('errormsg') # server has 2 variations of errormsg finally: if error: exit(error) return output
Checks to see if data returned from database is useable
entailment
def is_equal(self, string1, string2): ''' Simple string comparator ''' return string1.lower().strip() == string2.lower().strip()
Simple string comparator
entailment
def get_data_from_ilx(self, ilx_id): ''' Gets full meta data (expect their annotations and relationships) from is ILX ID ''' ilx_id = self.fix_ilx(ilx_id) url_base = self.base_path + "ilx/search/identifier/{identifier}?key={APIKEY}" url = url_base.format(identifier=ilx_id, APIKEY=self.APIKEY) output = self.get(url) # Can be a successful request, but not a successful response success = self.check_success(output) return output, success
Gets full meta data (expect their annotations and relationships) from is ILX ID
entailment
def search_by_label(self, label): ''' Server returns anything that is simlar in any catagory ''' url_base = self.base_path + 'term/search/{term}?key={api_key}' url = url_base.format(term=label, api_key=self.APIKEY) return self.get(url)
Server returns anything that is simlar in any catagory
entailment
def are_ilx(self, ilx_ids): ''' Checks list of objects to see if they are usable ILX IDs ''' total_data = [] for ilx_id in ilx_ids: ilx_id = ilx_id.replace('http', '').replace('.', '').replace('/', '') data, success = self.get_data_from_ilx(ilx_id) if success: total_data.append(data['data']) else: total_data.append({}) return total_data
Checks list of objects to see if they are usable ILX IDs
entailment
def add_triple(self, subj, pred, obj): ''' Adds an entity property to an existing entity ''' subj_data, pred_data, obj_data = self.are_ilx([subj, pred, obj]) # RELATIONSHIP PROPERTY if subj_data.get('id') and pred_data.get('id') and obj_data.get('id'): if pred_data['type'] != 'relationship': return self.test_check('Adding a relationship as formate \ "term1_ilx relationship_ilx term2_ilx"') return self.add_relationship(term1=subj_data, relationship=pred_data, term2=obj_data) # ANNOTATION PROPERTY elif subj_data.get('id') and pred_data.get('id'): if pred_data['type'] != 'annotation': return self.test_check('Adding a relationship as formate \ "term_ilx annotation_ilx value"') return self.add_annotation(entity=subj_data, annotation=pred_data, value=obj) # UPDATE ENTITY elif subj_data.get('id'): data = subj_data _pred = self.ttl2sci_map.get(pred) if not _pred: error = pred + " doesnt not have correct RDF format or It is not an option" return self.test_check(error) data = self.custom_update(data, _pred, obj) if data == 'failed': # for debugging custom_update return data data = superclasses_bug_fix(data) url_base = self.base_path + 'term/edit/{id}' url = url_base.format(id=data['id']) return self.post(url, data) else: return self.test_check('The ILX ID(s) provided do not exist')
Adds an entity property to an existing entity
entailment
def add_relationship(self, term1, relationship, term2): ''' Creates a relationship between 3 entities in database ''' url = self.base_path + 'term/add-relationship' data = {'term1_id': term1['id'], 'relationship_tid': relationship['id'], 'term2_id': term2['id'], 'term1_version': term1['version'], 'relationship_term_version': relationship['version'], 'term2_version': term2['version']} return self.post(url, data)
Creates a relationship between 3 entities in database
entailment
def add_annotation(self, entity, annotation, value): ''' Adds an annotation proprty to existing entity ''' url = self.base_path + 'term/add-annotation' data = {'tid': entity['id'], 'annotation_tid': annotation['id'], 'value': value, 'term_version': entity['version'], 'annotation_term_version': annotation['version']} return self.post(url, data)
Adds an annotation proprty to existing entity
entailment
def custom_update(self, data, pred, obj): ''' Updates existing entity proprty based on the predicate input ''' if isinstance(data[pred], str): # for all simple properties of str value data[pred] = str(obj) else: # synonyms, superclasses, and existing_ids have special requirements if pred == 'synonyms': literals = [d['literal'] for d in data[pred]] if obj not in literals: data[pred].append({'literal': obj}) # synonyms req for post elif pred == 'superclasses': ilx_ids = [d['ilx'] for d in data[pred]] if obj not in ilx_ids: _obj = obj.replace('ILX:', 'ilx_') super_data, success = self.get_data_from_ilx(ilx_id=_obj) super_data = super_data['data'] if success: # superclass req post data[pred].append({'id': super_data['id'], 'ilx': _obj}) else: return self.test_check('Your superclass ILX ID ' + _obj + ' does not exist.') elif pred == 'existing_ids': # FIXME need to autogenerate curies from a map iris = [d['iri'] for d in data[pred]] if obj not in iris: if 'http' not in obj: return self.test_check('exisiting id value must \ be a uri containing "http"') data[pred].append({ 'curie': self.qname(obj), 'iri': obj, 'preferred': '0' # preferred is auto generated by preferred_change }) #data[pred] = [] data = self.preferred_change(data) # One ex id is determined to be preferred else: # Somehow broke this code return self.test_check(pred + ' Has slipped through the cracks') return data
Updates existing entity proprty based on the predicate input
entailment
def add_entity(self, rdf_type, superclass, label, definition=None): ''' Adds entity as long as it doesn't exist and has a usable superclass ILX ID and rdf:type ''' # Checks if you inputed the right type rdf_type = rdf_type.lower().strip().replace('owl:Class', 'term') accepted_types = ['owl:Class', 'term', 'cde', 'annotation', 'relationship', 'fde'] if rdf_type not in accepted_types: error = 'rdf_type must be one of the following: {accepted_types}' return self.test_check(error.format(accepted_types=accepted_types)) # Pulls superclass data out and checks if it exists superclass_data, success = self.get_data_from_ilx(ilx_id=superclass) superclass_data = superclass_data['data'] if not success: error = '{superclass} is does not exist and cannot be used as a superclass.' return self.test_check(error.format(superclass=superclass)) # Searchs database to see if the term exists. Will return anything similar, # but we want only what is_equal search_results = self.search_by_label(label)['data'] search_results = [sr for sr in search_results if self.is_equal(sr['label'], label_bug_fix(label))] # If search_results is not empty, we need to see if the type and superclass are also a # match. If not, you can create this entity. HOWEVER. If you are the creator of an entity, # you can only have one label of any type or superclass if search_results: search_hits = 0 for entity in search_results: # garunteed to only have one match if any entity, success = self.get_data_from_ilx(ilx_id = entity['ilx']) # all metadata entity = entity['data'] user_url = 'https://scicrunch.org/api/1/user/info?key={api_key}' user_data = self.get(user_url.format(api_key=self.APIKEY)) user_data = user_data['data'] if str(entity['uid']) == str(user_data['id']): # creator check bp = 'Entity {label} already created by you with ILX ID {ilx_id} and of type {rdf_type}' return self.test_check(bp.format(label = label, ilx_id = entity['ilx'], rdf_type = entity['type'])) types_equal = self.is_equal(entity['type'], rdf_type) # type check if 'superclasses' in entity and entity['superclasses']: entity_super_ilx = entity['superclasses'][0]['ilx'] else: entity_super_ilx = '' supers_equal = self.is_equal(entity_super_ilx, superclass_data['ilx']) if types_equal and supers_equal: bp = 'Entity {label} already exisits with ILX ID {ilx_id} and of type {rdf_type}' return self.test_check(bp.format(label = label, ilx_id = self.fix_ilx(entity['ilx']), rdf_type = entity['type'])) # Generates ILX ID and does a validation check url = self.base_path + 'ilx/add' data = {'term': label, 'superclasses': [{ 'id': superclass_data['id'], 'ilx': superclass_data['ilx']}], 'type': rdf_type,} data = superclasses_bug_fix(data) output = self.post(url, data)['data'] if output.get('ilx'): ilx_id = output['ilx'] else: ilx_id = output['fragment'] # archetype of beta # Uses generated ILX ID to make a formal row in the database url = self.base_path + 'term/add' data = {'label': label.replace(''', "'").replace('"', '"'), 'ilx': ilx_id, 'superclasses': [{ 'id': superclass_data['id'], 'ilx': superclass_data['ilx']}], 'type': rdf_type} data = superclasses_bug_fix(data) if definition: data.update({'definition':definition}) return self.post(url, data)
Adds entity as long as it doesn't exist and has a usable superclass ILX ID and rdf:type
entailment
def sortProperties(self, properties): # modified to sort objects using their global rank """Take a hash from predicate uris to lists of values. Sort the lists of values. Return a sorted list of properties.""" # Sort object lists for prop, objects in properties.items(): objects.sort(key=self._globalSortKey) # Make sorted list of properties return sorted(properties, key=lambda p: self.predicate_rank[p])
Take a hash from predicate uris to lists of values. Sort the lists of values. Return a sorted list of properties.
entailment
def _buildPredicateHash(self, subject): # XXX unmodified """ Build a hash key by predicate to a list of objects for the given subject """ properties = {} for s, p, o in self.store.triples((subject, None, None)): oList = properties.get(p, []) oList.append(o) properties[p] = oList return properties
Build a hash key by predicate to a list of objects for the given subject
entailment
def isValidList(self, l): # modified to flatten lists specified using [ a rdf:List; ] syntax """ Checks if l is a valid RDF list, i.e. no nodes have other properties. """ try: if self.store.value(l, RDF.first) is None: return False except: return False while l: if l != RDF.nil: po = list(self.store.predicate_objects(l)) if (RDF.type, RDF.List) in po and len(po) == 3: pass elif len(po) != 2: return False l = self.store.value(l, RDF.rest) return True
Checks if l is a valid RDF list, i.e. no nodes have other properties.
entailment
def _write(self, value): """ rename to write and import inspect to debut the callstack """ if ' ' in value: s = inspect.stack() fn = s[1].function super().write('%%DEBUG {} %%'.format(fn)) super().write(value)
rename to write and import inspect to debut the callstack
entailment
def serialize(self, *args, **kwargs): """ Modified to allow additional labels to be passed in. """ if 'labels' in kwargs: # populate labels from outside the local graph self._labels.update(kwargs['labels']) super(HtmlTurtleSerializer, self).serialize(*args, **kwargs)
Modified to allow additional labels to be passed in.
entailment
def checkCalledInside(classname, stack): """ Fantastically inefficient! """ ok = False for s in stack[1:]: cc = s.code_context[0] if 'class' in cc: if '(' in cc: bases = [b.strip() for b in cc.split('(')[1].split(')')[0].split(',')] for base_name in bases: if base_name in s.frame.f_globals: base = s.frame.f_globals[base_name] for cls in base.__class__.mro(base): if cls.__name__ == classname: ok = True break if ok: break if ok: break if not ok: name = stack[0].function raise SyntaxError('%s not called inside a class inheriting from LocalNameManager' % name)
Fantastically inefficient!
entailment
def addLNT(LocalName, phenoId, predicate, g=None): # XXX deprecated """ Add a local name for a phenotype from a pair of identifiers """ if g is None: s = inspect.stack(0) # horribly inefficient checkCalledInside('LocalNameManager', s) g = s[1][0].f_locals # get globals of calling scope addLN(LocalName, Phenotype(phenoId, predicate), g)
Add a local name for a phenotype from a pair of identifiers
entailment
def resetLocalNames(g=None): """ WARNING: Only call from top level! THIS DOES NOT RESET NAMES in an embeded IPython!!! Remove any local names that have already been defined. """ if g is None: g = inspect.stack(0)[1][0].f_locals # get globals of calling scope for k in list(graphBase.LocalNames.keys()): try: g.pop(k) except KeyError: raise KeyError('%s not in globals, are you calling resetLocalNames from a local scope?' % k) graphBase.LocalNames.pop(k)
WARNING: Only call from top level! THIS DOES NOT RESET NAMES in an embeded IPython!!! Remove any local names that have already been defined.
entailment
def load_existing(self): """ advanced usage allows loading multiple sets of neurons and using a config object to keep track of the different graphs """ from pyontutils.closed_namespaces import rdfs # bag existing try: next(iter(self.neurons())) raise self.ExistingNeuronsError('Existing neurons detected. Please ' 'load from file before creating neurons!') except StopIteration: pass def getClassType(s): graph = self.load_graph Class = infixowl.Class(s, graph=graph) for ec in Class.equivalentClass: if isinstance(ec.identifier, rdflib.BNode): bc = infixowl.CastClass(ec, graph=graph) if isinstance(bc, infixowl.BooleanClass): for id_ in bc._rdfList: if isinstance(id_, rdflib.URIRef): yield id_ # its one of our types # bug is that I am not wiping graphBase.knownClasses and swapping it for each config # OR the bug is that self.load_graph is persisting, either way the call to type() # below seems to be the primary suspect for the issue if not graphBase.ignore_existing: ogp = Path(graphBase.ng.filename) # FIXME ng.filename <-> out_graph_path property ... if ogp.exists(): from itertools import chain from rdflib import Graph # FIXME self.load_graph = Graph().parse(graphBase.ng.filename, format='turtle') graphBase.load_graph = self.load_graph # FIXME memory inefficiency here ... _ = [graphBase.in_graph.add(t) for t in graphBase.load_graph] # FIXME use conjuctive ... if len(graphBase.python_subclasses) == 2: # FIXME magic number for Neuron and NeuronCUT ebms = [type(OntId(s).suffix, (NeuronCUT,), dict(owlClass=s)) for s in self.load_graph[:rdfs.subClassOf:NeuronEBM.owlClass] if not graphBase.knownClasses.append(s)] else: ebms = [] class_types = [(type, s) for s in self.load_graph[:rdf.type:owl.Class] for type in getClassType(s) if type] sc = None for sc in chain(graphBase.python_subclasses, ebms): sc.owlClass iris = [s for type, s in class_types if type == sc.owlClass] if iris: sc._load_existing(iris) if sc is None: raise ImportError(f'Failed to find any neurons to load in {graphBase.ng.filename}')
advanced usage allows loading multiple sets of neurons and using a config object to keep track of the different graphs
entailment
def configGraphIO(remote_base, local_base= None, branch= 'master', core_graph_paths= tuple(), core_graph= None, in_graph_paths= tuple(), out_graph_path= None, out_imports= tuple(), out_graph= None, prefixes= tuple(), force_remote= False, checkout_ok= ont_checkout_ok, scigraph= None, iri= None, sources= tuple(), source_file= None, use_local_import_paths=True, compiled_location= (PPath('/tmp/neurondm/compiled') if working_dir is None else PPath(working_dir, 'neurondm/neurondm/compiled')), ignore_existing= False, local_conventions= False,): # FIXME suffixes seem like a bad way to have done this :/ """ We set this up to work this way because we can't instantiate graphBase, it is a super class that needs to be configurable and it needs to do so globally. All the default values here are examples and not real. You should write a local `def config` function as part of your local setup that replicates that arguments of configureGraphIO. Example: def config(remote_base= 'http://someurl.org/remote/ontology/', local_base= '/home/user/git/ontology/', branch= 'master', core_graph_paths= ['local/path/localCore.ttl', 'local/path/localClasses.ttl'], core_graph= None, in_graph_paths= tuple(), out_graph_path= '/tmp/outputGraph.ttl', out_imports= ['local/path/localCore.ttl'], out_graph= None, prefixes= {'hello':'http://world.org/'} force_remote= False, checkout_ok= False, scigraph= 'http://scigraph.mydomain.org:9000/scigraph'): graphBase.configGraphIO(remote_base, local_base, branch, core_graph_paths, core_graph, in_graph_paths, out_graph_path, out_imports, out_graph, force_remote, checkout_ok, scigraph) """ graphBase.local_conventions = local_conventions if local_base is None: local_base = devconfig.ontology_local_repo graphBase.local_base = Path(local_base).expanduser().resolve() graphBase.remote_base = remote_base def makeLocalRemote(suffixes): remote = [os.path.join(graphBase.remote_base, branch, s) if '://' not in s else # 'remote' is file:// or http[s]:// s for s in suffixes] # TODO the whole thing needs to be reworked to not use suffixes... local = [(graphBase.local_base / s).as_posix() if '://' not in s else ((graphBase.local_base / s.replace(graphBase.remote_base, '').strip('/')).as_uri() if graphBase.remote_base in s else s) # FIXME this breaks the semanics of local? for s in suffixes] return remote, local # file location setup remote_core_paths, local_core_paths = makeLocalRemote(core_graph_paths) remote_in_paths, local_in_paths = makeLocalRemote(in_graph_paths) remote_out_imports, local_out_imports = makeLocalRemote(out_imports) out_graph_paths = [out_graph_path] remote_out_paths, local_out_paths = makeLocalRemote(out_graph_paths) # XXX fail w/ tmp remote_out_paths = local_out_paths # can't write to a remote server without magic if (not force_remote and graphBase.local_base == Path(devconfig.ontology_local_repo) and graphBase.local_base.exists()): repo = Repo(graphBase.local_base.as_posix()) if repo.active_branch.name != branch and not checkout_ok: raise graphBase.GitRepoOnWrongBranch( 'Local git repo not on %s branch!\n' 'Please run `git checkout %s` in %s, ' 'set NIFSTD_CHECKOUT_OK= via export or ' 'at runtime, or set checkout_ok=True.' % (branch, branch, repo.working_dir)) elif checkout_ok: graphBase.repo = repo graphBase.working_branch = next(h for h in repo.heads if h.name == branch) graphBase.original_branch = repo.active_branch graphBase.set_repo_state() use_core_paths = local_core_paths use_in_paths = local_in_paths else: if not force_remote and not graphBase.local_base.exists(): log.warning(f'Warning local ontology path {local_base!r} not found!') use_core_paths = remote_core_paths use_in_paths = remote_in_paths if local_base is not None: log.warning(f'Warning local base has been set manually you are on your own!') try: repo = Repo(graphBase.local_base.as_posix()) except (git.exc.InvalidGitRepositoryError, git.exc.NoSuchPathError) as e: local_working_dir = get_working_dir(graphBase.local_base) if local_working_dir is None: raise e else: msg = (f'{graphBase.local_base} is already contained in a git repository ' 'located in {local_working_dir} if you wish to use this repo please ' 'set local_base to {local_working_dir}.') raise git.exc.InvalidGitRepositoryError(msg) from e graphBase.repo = repo # FIXME repo init when branch set still an issue # ideally remove _all_ of this code though because WOW # it is a mess graphBase.set_repo_state() # core graph setup if core_graph is None: core_graph = rdflib.ConjunctiveGraph() for cg in use_core_paths: try: core_graph.parse(cg, format='turtle') except (FileNotFoundError, HTTPError) as e: # TODO failover to local if we were remote? #print(tc.red('WARNING:'), f'no file found for core graph at {cg}') log.warning(f'no file found for core graph at {cg}') graphBase.core_graph = core_graph if RDFL not in [type(s) for s in OntTerm.query.services]: # FIXME ah subtle differences between graphs >_< # need a much more consistent way to handle the local graphs # switching everything out for a single RDFL instance seems # the most attractive ... OntTerm.query.ladd(RDFL(core_graph, OntId)) # ladd for higher priority # store prefixes if isinstance(prefixes, dict): graphBase.prefixes = prefixes else: graphBase.prefixes = makePrefixes(*prefixes) PREFIXES = {**graphBase.prefixes, **uPREFIXES} OntCuries(PREFIXES) # input graph setup in_graph = core_graph for ig in use_in_paths: in_graph.parse(ig, format='turtle') nin_graph = makeGraph('', prefixes=PREFIXES, graph=in_graph) graphBase.in_graph = in_graph graphBase.ignore_existing = ignore_existing # output graph setup if out_graph is None: _sources = sources _source_file = source_file class NeurOnt(Ont): # FIXME this is super misleading wrt the source ... path = 'ttl/generated/neurons/' #filename = 'to-be-set-later' prefixes = PREFIXES sources = _sources source_file = _source_file # FIXME temp fix for issue with wgb in core #wasGeneratedBy = ('https://github.com/tgbugs/pyontutils/blob/' #'{commit}/' #'{filepath}' #'{hash_L_line}') no = NeurOnt() out_graph = no.graph graphBase.ng = no._graph #out_graph = rdflib.Graph() # in thise case we also want to wipe any existing python Neuron entires # that we use to serialize so that behavior is consistent NeuronBase.existing_pes = {} NeuronBase.existing_ids = {} else: no = None graphBase.ng = makeGraph('', prefixes=PREFIXES, graph=out_graph) #new_graph = makeGraph('', prefixes=PREFIXES, graph=out_graph) graphBase.out_graph = out_graph # python output setup graphBase.compiled_location = compiled_location # makeGraph setup new_graph = graphBase.ng #= new_graph new_graph.filename = out_graph_path if iri is not None: ontid = rdflib.URIRef(iri) else: ontid = rdflib.URIRef('file://' + out_graph_path) # do not use Path().absolute() it will leak if use_local_import_paths: new_graph.add_trip(ontid, rdf.type, owl.Ontology) for local_out_import in local_out_imports: # TODO flip switch between local and remote import behavior new_graph.add_trip(ontid, owl.imports, rdflib.URIRef(local_out_import)) # core should be in the import closure else: new_graph.add_trip(ontid, rdf.type, owl.Ontology) for remote_out_import in remote_out_imports: # TODO flip switch between local and remote import behavior new_graph.add_trip(ontid, owl.imports, rdflib.URIRef(remote_out_import)) # core should be in the import closure if no is not None: no() # populate generated by info # set predicates graphBase._predicates = getPhenotypePredicates(graphBase.core_graph) # scigraph setup if scigraph is not None: graphBase._sgv = Vocabulary(cache=True, basePath=scigraph) else: graphBase._sgv = Vocabulary(cache=True)
We set this up to work this way because we can't instantiate graphBase, it is a super class that needs to be configurable and it needs to do so globally. All the default values here are examples and not real. You should write a local `def config` function as part of your local setup that replicates that arguments of configureGraphIO. Example: def config(remote_base= 'http://someurl.org/remote/ontology/', local_base= '/home/user/git/ontology/', branch= 'master', core_graph_paths= ['local/path/localCore.ttl', 'local/path/localClasses.ttl'], core_graph= None, in_graph_paths= tuple(), out_graph_path= '/tmp/outputGraph.ttl', out_imports= ['local/path/localCore.ttl'], out_graph= None, prefixes= {'hello':'http://world.org/'} force_remote= False, checkout_ok= False, scigraph= 'http://scigraph.mydomain.org:9000/scigraph'): graphBase.configGraphIO(remote_base, local_base, branch, core_graph_paths, core_graph, in_graph_paths, out_graph_path, out_imports, out_graph, force_remote, checkout_ok, scigraph)
entailment
def python_header(cls): out = '#!/usr/bin/env python3.6\n' out += f'from {cls.__import_name__} import *\n\n' all_types = set(type(n) for n in cls.neurons()) _subs = [inspect.getsource(c) for c in subclasses(Neuron) if c in all_types and Path(inspect.getfile(c)).exists()] subs = '\n' + '\n\n'.join(_subs) + '\n\n' if _subs else '' #log.debug(str(all_types)) #log.debug(f'python header for {cls.filename_python()}:\n{subs}') out += subs ind = '\n' + ' ' * len('config = Config(') _prefixes = {k:str(v) for k, v in cls.ng.namespaces.items() if k not in uPREFIXES and k != 'xml' and k != 'xsd'} # FIXME don't hardcode xml xsd len_thing = len(f'config = Config({cls.ng.name!r}, prefixes={{') '}}' prefixes = (f',{ind}prefixes={pformat(_prefixes, 0)}'.replace('\n', '\n' + ' ' * len_thing) if _prefixes else '') tel = ttl_export_dir = Path(cls.ng.filename).parent.as_posix() ttlexp = f',{ind}ttl_export_dir={tel!r}' # FIXME prefixes should be separate so they are accessible in the namespace # FIXME ilxtr needs to be detected as well # FIXME this doesn't trigger when run as an import? out += f'config = Config({cls.ng.name!r},{ind}file=__file__{ttlexp}{prefixes})\n\n' # FIXME this is from neurons.lang return out
}}
entailment
def equivalentClass(self, *others): """ as implemented this acts as a permenant bag union operator and therefore should be used with extreme caution since in any given context the computed label/identifier will no longer reflect the entailed/reasoned bag In a static context this means that we might want to have an ilxtr:assertedAlwaysImplies -> bag union neuron """ # FIXME this makes the data model mutable! # TODO symmetry here # serious modelling issue # If you make two bags equivalent to eachother # then in the set theory model it becomes impossible # to have a set that is _just_ one or the other of the bags # which I do not think that we want for other in others: if isinstance(other, self.__class__): #if isinstance(other, NegPhenotype): # FIXME maybe this is the issue with neg equivs? otherid = other.id_ else: otherid = other self.out_graph.add((self.id_, owl.equivalentClass, otherid)) self._equivalent_bags_ids.add(otherid) return self
as implemented this acts as a permenant bag union operator and therefore should be used with extreme caution since in any given context the computed label/identifier will no longer reflect the entailed/reasoned bag In a static context this means that we might want to have an ilxtr:assertedAlwaysImplies -> bag union neuron
entailment
def label_maker(self): """ needed to defer loading of local conventions to avoid circular dependency issue """ if (not hasattr(graphBase, '_label_maker') or graphBase._label_maker.local_conventions != graphBase.local_conventions): graphBase._label_maker = LabelMaker(graphBase.local_conventions) return graphBase._label_maker
needed to defer loading of local conventions to avoid circular dependency issue
entailment
def _graphify(self, *args, graph=None): # defined """ Lift phenotypeEdges to Restrictions """ if graph is None: graph = self.out_graph ################## LABELS ARE DEFINED HERE ################## gl = self.genLabel ll = self.localLabel ol = self.origLabel graph.add((self.id_, ilxtr.genLabel, rdflib.Literal(gl))) if ll != gl: graph.add((self.id_, ilxtr.localLabel, rdflib.Literal(ll))) if ol and ol != gl: graph.add((self.id_, ilxtr.origLabel, rdflib.Literal(ol))) members = [self.expand(self.owlClass)] for pe in self.pes: target = pe._graphify(graph=graph) if isinstance(pe, NegPhenotype): # isinstance will match NegPhenotype -> Phenotype #self.Class.disjointWith = [target] # FIXME for defined neurons this is what we need and I think it is strong than the complementOf version djc = infixowl.Class(graph=graph) # TODO for generic neurons this is what we need djc.complementOf = target members.append(djc) else: members.append(target) # FIXME negative logical phenotypes :/ intersection = infixowl.BooleanClass(members=members, graph=graph) # FIXME dupes #existing = list(self.Class.equivalentClass) #if existing or str(pe.pLabel) == 'Htr3a': #embed() ec = [intersection] self.Class.equivalentClass = ec return self.Class
Lift phenotypeEdges to Restrictions
entailment
def run(args): if args['--debug']: print(args) # ignoring bamboo sequecing for the moment... # check the build server to see if we have built the latest (or the specified commit) # if yes just scp those to services # if no check the web endpoint to see if latest matches build # if yes fetch # else build (push to web endpoint) 'localhost:~/files/ontology-packages/scigraph/' 'tom@orpheus:~/files/ontology-packages/graph/' #b = Builder(build_host, services_host, build_user, services_user, #graph_latest_url, scigraph_latest_url, #scp, sscp, git_local, repo_name, #services_folder, graph_folder, services_config_folder #) kwargs = {k.strip('--').strip('<').rstrip('>').replace('-','_'):v for k, v in args.items()} b = Builder(args, **kwargs) if b.mode is not None: code = b.run() else: if args['--view-defaults']: for k, v in combined_defaults.items(): print(f'{k:<22} {v}') return if b.debug: FILE = '/tmp/test.sh' with open(FILE, 'wt') as f: f.write('#!/usr/bin/env bash\n' + code) os.system(f"emacs -batch {FILE} --eval '(indent-region (point-min) (point-max) nil)' -f save-buffer") print() with open(FILE, 'rt') as f: print(f.read()) #embed() if b.local: return #if b.check_built: #return else: print(code)
localhost:~/files/ontology-packages/scigraph/
entailment
def runOnExecutor(self, *commands, oper=ACCEPT, defer_shell_expansion=False): """ This runs in the executor of the current scope. You cannot magically back out since there are no gurantees that ssh keys will be in place (they shouldn't be). """ return self.makeOutput(EXE, commands, oper=oper, defer_shell_expansion=defer_shell_expansion)
This runs in the executor of the current scope. You cannot magically back out since there are no gurantees that ssh keys will be in place (they shouldn't be).
entailment
def build(self, mode=None, check=False): """ Just shuffle the current call off to the build server with --local attached """ kwargs = {} if not self.build_only: # don't try to deploy twice kwargs[' --build-only'] = True if not self.local: kwargs['--local'] = True if check: kwargs['--check-built'] = True remote_args = self.formatted_args(self.args, mode, **kwargs) cmds = tuple() if self.check_built or self._updated else self.cmds_pyontutils() if not self._updated: self._updated = True return self.runOnBuild(*cmds, f'scigraph-deploy {remote_args}', defer_shell_expansion=True, oper=AND)
Just shuffle the current call off to the build server with --local attached
entailment
def swanson(): """ not really a parcellation scheme NOTE: the defining information up here is now deprecated it is kept around to keep the code further down happy """ source = Path(devconfig.resources, 'swanson_aligned.txt').as_posix() ONT_PATH = 'http://ontology.neuinfo.org/NIF/ttl/generated/' filename = 'swanson_hierarchies' ontid = ONT_PATH + filename + '.ttl' PREFIXES = SwansonLabels.prefixes new_graph = makeGraph(filename, PREFIXES, writeloc='/tmp/') new_graph.add_ont(ontid, 'Swanson brain partomies', 'Swanson 2014 Partonomies', 'This file is automatically generated from ' + source + '.' + '**FIXME**', 'now') # FIXME citations should really go on the ... anatomy? scheme artifact definingCitation = 'Swanson, Larry W. Neuroanatomical Terminology: a lexicon of classical origins and historical foundations. Oxford University Press, USA, 2014.' definingCitationID = 'ISBN:9780195340624' new_graph.add_trip(ontid, 'NIFRID:definingCitation', definingCitation) new_graph.add_trip(ontid, 'NIFRID:definingCitationID', definingCitationID) with open(source, 'rt') as f: lines = [l.strip() for l in f.readlines()] # join header on page 794 lines[635] += ' ' + lines.pop(636) #fix for capitalization since this header is reused fixed = ' or '.join([' ('.join([n.capitalize() for n in _.split(' (')]) for _ in lines[635].lower().split(' or ')]).replace('human','HUMAN') lines[635] = fixed data = [] for l in lines: if not l.startswith('#'): level = l.count('.'*5) l = l.strip('.') if ' (' in l: if ') or' in l: n1, l = l.split(') or') area_name, citationP = n1.strip().split(' (') citation = citationP.rstrip(')') d = (level, area_name, citation, 'NEXT SYN') data.append(d) #print(tc.red(tc.bold(repr(d)))) area_name, citationP = l.strip().split(' (') citation = citationP.rstrip(')') else: area_name = l citation = None d = (level, area_name, citation, None) #print(d) data.append(d) results = async_getter(sgv.findByTerm, [(d[1],) for d in data]) #results = [None] * len(data) curies = [[r['curie'] for r in _ if 'UBERON' in r['curie']] if _ else [] for _ in results] output = [_[0] if _ else None for _ in curies] header = ['Depth', 'Name', 'Citation', 'NextSyn', 'Uberon'] zoop = [header] + [r for r in zip(*zip(*data), output)] + \ [(0, 'Appendix END None', None, None, None)] # needed to add last appendix # TODO annotate the appendicies and the classes with these appendix_root_mapping = (1, 1, 1, 1, 30, 83, 69, 70, 74, 1) # should generate? class SP(rowParse): def __init__(self): self.nodes = defaultdict(dict) self._appendix = 0 self.appendicies = {} self._last_at_level = {} self.names = defaultdict(set) self.children = defaultdict(set) self.parents = defaultdict(set) self.next_syn = False super().__init__(zoop) def Depth(self, value): if self.next_syn: self.synonym = self.next_syn else: self.synonym = False self.depth = value def Name(self, value): self.name = value def Citation(self, value): self.citation = value def NextSyn(self, value): if value: self.next_syn = self._rowind else: self.next_syn = False def Uberon(self, value): self.uberon = value def _row_post(self): # check if we are in the next appendix # may want to xref ids between appendicies as well... if self.depth == 0: if self.name.startswith('Appendix'): if self._appendix: self.appendicies[self._appendix]['children'] = dict(self.children) self.appendicies[self._appendix]['parents'] = dict(self.parents) self._last_at_level = {} self.children = defaultdict(set) self.parents = defaultdict(set) _, num, apname = self.name.split(' ', 2) if num == 'END': return self._appendix = int(num) self.appendicies[self._appendix] = { 'name':apname.capitalize(), 'type':self.citation.capitalize() if self.citation else None} return else: if ' [' in self.name: name, taxonB = self.name.split(' [') self.name = name self.appendicies[self._appendix]['taxon'] = taxonB.rstrip(']').capitalize() else: # top level is animalia self.appendicies[self._appendix]['taxon'] = 'ANIMALIA'.capitalize() self.name = self.name.capitalize() self.citation = self.citation.capitalize() # nodes if self.synonym: self.nodes[self.synonym]['synonym'] = self.name self.nodes[self.synonym]['syn-cite'] = self.citation self.nodes[self.synonym]['syn-uberon'] = self.uberon return else: if self.citation: # Transverse Longitudinal etc all @ lvl4 self.names[self.name + ' ' + self.citation].add(self._rowind) else: self.name += str(self._appendix) + self.nodes[self._last_at_level[self.depth - 1]]['label'] #print(level, self.name) # can't return here because they are their own level # replace with actually doing something... self.nodes[self._rowind]['label'] = self.name self.nodes[self._rowind]['citation'] = self.citation self.nodes[self._rowind]['uberon'] = self.uberon # edges self._last_at_level[self.depth] = self._rowind # TODO will need something to deal with the Lateral/ if self.depth > 0: try: parent = self._last_at_level[self.depth - 1] except: embed() self.children[parent].add(self._rowind) self.parents[self._rowind].add(parent) def _end(self): replace = {} for asdf in [sorted(n) for k,n in self.names.items() if len(n) > 1]: replace_with, to_replace = asdf[0], asdf[1:] for r in to_replace: replace[r] = replace_with for r, rw in replace.items(): #print(self.nodes[rw]) o = self.nodes.pop(r) #print(o) for vals in self.appendicies.values(): children = vals['children'] parents = vals['parents'] # need reversed so children are corrected before swap for r, rw in reversed(sorted(replace.items())): if r in parents: child = r new_child = rw parent = parents.pop(child) parents[new_child] = parent parent = list(parent)[0] children[parent].remove(child) children[parent].add(new_child) if r in children: parent = r new_parent = rw childs = children.pop(parent) children[new_parent] = childs for child in childs: parents[child] = {new_parent} self.nodes = dict(self.nodes) sp = SP() tp = [_ for _ in sorted(['{: <50}'.format(n['label']) + n['uberon'] if n['uberon'] else n['label'] for n in sp.nodes.values()])] #print('\n'.join(tp)) #print(sp.appendicies[1].keys()) #print(sp.nodes[1].keys()) nbase = PREFIXES['SWAN'] + '%s' json_ = {'nodes':[],'edges':[]} parent = ilxtr.swansonBrainRegionConcept for node, anns in sp.nodes.items(): nid = nbase % node new_graph.add_class(nid, parent, label=anns['label']) new_graph.add_trip(nid, 'NIFRID:definingCitation', anns['citation']) json_['nodes'].append({'lbl':anns['label'],'id':'SWA:' + str(node)}) #if anns['uberon']: #new_graph.add_trip(nid, owl.equivalentClass, anns['uberon']) # issues arrise here... for appendix, data in sp.appendicies.items(): aid = PREFIXES['SWAA'] + str(appendix) new_graph.add_class(aid, label=data['name'].capitalize()) new_graph.add_trip(aid, 'ilxtr:hasTaxonRank', data['taxon']) # FIXME appendix is the data artifact... children = data['children'] ahp = 'swanr:hasPart' + str(appendix) apo = 'swanr:partOf' + str(appendix) new_graph.add_op(ahp, transitive=True) new_graph.add_op(apo, inverse=ahp, transitive=True) for parent, childs in children.items(): # FIXME does this give complete coverage? pid = nbase % parent for child in childs: cid = nbase % child new_graph.add_restriction(pid, ahp, cid) # note hierarhcy inverts direction new_graph.add_restriction(cid, apo, pid) json_['edges'].append({'sub':'SWA:' + str(child),'pred':apo,'obj':'SWA:' + str(parent)}) return new_graph
not really a parcellation scheme NOTE: the defining information up here is now deprecated it is kept around to keep the code further down happy
entailment
def relative_resources(pathstring, failover='nifstd/resources'): """ relative paths to resources in this repository `failover` matches the location relative to the github location (usually for prov purposes) """ if working_dir is None: return Path(failover, pathstring).resolve() else: return Path(devconfig.resources, pathstring).resolve().relative_to(working_dir.resolve())
relative paths to resources in this repository `failover` matches the location relative to the github location (usually for prov purposes)
entailment
def build(*onts, fail=False, n_jobs=9, write=True): """ Set n_jobs=1 for debug or embed() will crash. """ tail = lambda:tuple() lonts = len(onts) if lonts > 1: for i, ont in enumerate(onts): if ont.__name__ == 'parcBridge': onts = onts[:-1] def tail(o=ont): return o.setup(), if i != lonts - 1: raise ValueError('parcBridge should be built last to avoid weird errors!') # ont_setup must be run first on all ontologies # or we will get weird import errors if n_jobs == 1 or True: return tuple(ont.make(fail=fail, write=write) for ont in tuple(ont.setup() for ont in onts) + tail()) # have to use a listcomp so that all calls to setup() # finish before parallel goes to work return Parallel(n_jobs=n_jobs)(delayed(o.make)(fail=fail, write=write) for o in #[ont_setup(ont) for ont in onts]) (tuple(Async()(deferred(ont.setup)() for ont in onts)) + tail() if n_jobs > 1 else [ont.setup() for ont in onts]))
Set n_jobs=1 for debug or embed() will crash.
entailment
def qname(uri, warning=False): """ compute qname from defaults """ if warning: print(tc.red('WARNING:'), tc.yellow(f'qname({uri}) is deprecated! please use OntId({uri}).curie')) return __helper_graph.qname(uri)
compute qname from defaults
entailment
def cull_prefixes(graph, prefixes={k:v for k, v in uPREFIXES.items() if k != 'NIFTTL'}, cleanup=lambda ps, graph: None, keep=False): """ Remove unused curie prefixes and normalize to a standard set. """ prefs = [''] if keep: prefixes.update({p:str(n) for p, n in graph.namespaces()}) if '' not in prefixes: prefixes[''] = null_prefix # null prefix pi = {v:k for k, v in prefixes.items()} asdf = {} #{v:k for k, v in ps.items()} asdf.update(pi) # determine which prefixes we need for uri in set((e for t in graph for e in t)): if uri.endswith('.owl') or uri.endswith('.ttl') or uri.endswith('$$ID$$'): continue # don't prefix imports or templates for rn, rp in sorted(asdf.items(), key=lambda a: -len(a[0])): # make sure we get longest first lrn = len(rn) if type(uri) == rdflib.BNode: continue elif uri.startswith(rn) and '#' not in uri[lrn:] and '/' not in uri[lrn:]: # prevent prefixing when there is another sep prefs.append(rp) break ps = {p:prefixes[p] for p in prefs} cleanup(ps, graph) ng = makeGraph('', prefixes=ps) [ng.g.add(t) for t in graph] return ng
Remove unused curie prefixes and normalize to a standard set.
entailment
def displayTriples(triples, qname=qname): """ triples can also be an rdflib Graph instance """ [print(*(e[:5] if isinstance(e, rdflib.BNode) else qname(e) for e in t), '.') for t in sorted(triples)]
triples can also be an rdflib Graph instance
entailment
def write(self, cull=False): """ Serialize self.g and write to self.filename, set cull to true to remove unwanted prefixes """ if cull: cull_prefixes(self).write() else: ser = self.g.serialize(format='nifttl') with open(self.filename, 'wb') as f: f.write(ser)
Serialize self.g and write to self.filename, set cull to true to remove unwanted prefixes
entailment
def add_ap(self, id_, label=None, addPrefix=True): """ Add id_ as an owl:AnnotationProperty""" self.add_trip(id_, rdf.type, owl.AnnotationProperty) if label: self.add_trip(id_, rdfs.label, label) if addPrefix: prefix = ''.join([s.capitalize() for s in label.split()]) namespace = self.expand(id_) self.add_namespace(prefix, namespace)
Add id_ as an owl:AnnotationProperty
entailment
def add_op(self, id_, label=None, subPropertyOf=None, inverse=None, transitive=False, addPrefix=True): """ Add id_ as an owl:ObjectProperty""" self.add_trip(id_, rdf.type, owl.ObjectProperty) if inverse: self.add_trip(id_, owl.inverseOf, inverse) if subPropertyOf: self.add_trip(id_, rdfs.subPropertyOf, subPropertyOf) if label: self.add_trip(id_, rdfs.label, label) if addPrefix: prefix = ''.join([s.capitalize() for s in label.split()]) namespace = self.expand(id_) self.add_namespace(prefix, namespace) if transitive: self.add_trip(id_, rdf.type, owl.TransitiveProperty)
Add id_ as an owl:ObjectProperty
entailment
def add_hierarchy(self, parent, edge, child): # XXX DEPRECATED """ Helper function to simplify the addition of part_of style objectProperties to graphs. FIXME make a method of makeGraph? """ if type(parent) != rdflib.URIRef: parent = self.check_thing(parent) if type(edge) != rdflib.URIRef: edge = self.check_thing(edge) if type(child) != infixowl.Class: if type(child) != rdflib.URIRef: child = self.check_thing(child) child = infixowl.Class(child, graph=self.g) restriction = infixowl.Restriction(edge, graph=self.g, someValuesFrom=parent) child.subClassOf = [restriction] + [c for c in child.subClassOf]
Helper function to simplify the addition of part_of style objectProperties to graphs. FIXME make a method of makeGraph?
entailment
def add_restriction(self, subject, predicate, object_): """ Lift normal triples into restrictions using someValuesFrom. """ if type(object_) != rdflib.URIRef: object_ = self.check_thing(object_) if type(predicate) != rdflib.URIRef: predicate = self.check_thing(predicate) if type(subject) != infixowl.Class: if type(subject) != rdflib.URIRef: subject = self.check_thing(subject) subject = infixowl.Class(subject, graph=self.g) restriction = infixowl.Restriction(predicate, graph=self.g, someValuesFrom=object_) subject.subClassOf = [restriction] + [c for c in subject.subClassOf]
Lift normal triples into restrictions using someValuesFrom.
entailment
def get_equiv_inter(self, curie): """ get equivelant classes where curie is in an intersection """ start = self.qname(self.expand(curie)) # in case something is misaligned qstring = """ SELECT DISTINCT ?match WHERE { ?match owl:equivalentClass/owl:intersectionOf/rdf:rest*/rdf:first %s . }""" % start return [_ for (_,) in self.g.query(qstring)]
get equivelant classes where curie is in an intersection
entailment
def qname(self, uri, generate=False): """ Given a uri return the qname if it exists, otherwise return the uri. """ try: prefix, namespace, name = self.g.namespace_manager.compute_qname(uri, generate=generate) qname = ':'.join((prefix, name)) return qname except (KeyError, ValueError) as e: return uri.toPython() if isinstance(uri, rdflib.URIRef) else uri
Given a uri return the qname if it exists, otherwise return the uri.
entailment
def archive_history_item(item, destination, no_color): """ Archive the log and results file for the given history item. Copy the files and update the results file in destination directory. """ log_src, description = split_history_item(item.strip()) # Get relative path for log: # {provider}/{image}/{instance}/{timestamp}.log log_dest = os.path.sep.join(log_src.rsplit(os.path.sep, 4)[1:]) # Get results src and destination based on log paths. results_src = log_src.rsplit('.', 1)[0] + '.results' results_dest = log_dest.rsplit('.', 1)[0] + '.results' destination_path = os.path.join(destination, log_dest) log_dir = os.path.dirname(destination_path) try: if not os.path.isdir(log_dir): os.makedirs(log_dir) # Copy results and log files to archive directory. shutil.copyfile(log_src, destination_path) shutil.copyfile(results_src, os.path.join(destination, results_dest)) except Exception as error: echo_style( 'Unable to archive history item: %s' % error, no_color, fg='red' ) sys.exit(1) else: # Only update the archive results log if no error occur. update_history_log( os.path.join(destination, '.history'), description=description, test_log=log_dest )
Archive the log and results file for the given history item. Copy the files and update the results file in destination directory.
entailment
def echo_results(data, no_color, verbose=False): """Print test results in nagios style format.""" try: summary = data['summary'] except KeyError as error: click.secho( 'The results json is missing key: %s' % error, fg='red' ) sys.exit(1) if 'failed' in summary or 'error' in summary: fg = 'red' status = 'FAILED' else: fg = 'green' status = 'PASSED' results = '{} tests={}|pass={}|skip={}|fail={}|error={}'.format( status, str(summary.get('num_tests', 0)), str(summary.get('passed', 0)), str(summary.get('skipped', 0)), str(summary.get('failed', 0)), str(summary.get('error', 0)) ) echo_style(results, no_color, fg=fg) if verbose: echo_verbose_results(data, no_color)
Print test results in nagios style format.
entailment
def echo_results_file(results_file, no_color, verbose=False): """Print test results in nagios style format.""" try: data = collect_results(results_file) except ValueError: echo_style( 'The results file is not the proper json format.', no_color, fg='red' ) sys.exit(1) except Exception as error: echo_style( 'Unable to process results file: %s' % error, no_color, fg='red' ) sys.exit(1) echo_results(data, no_color, verbose)
Print test results in nagios style format.
entailment
def echo_verbose_results(data, no_color): """Print list of tests and result of each test.""" click.echo() click.echo( '\n'.join( '{}: {}'.format(key, val) for key, val in data['info'].items() ) ) click.echo() for test in data['tests']: if test['outcome'] == 'passed': fg = 'green' elif test['outcome'] == 'skipped': fg = 'yellow' else: fg = 'red' name = parse_test_name(test['name']) echo_style( '{} {}'.format(name, test['outcome'].upper()), no_color, fg=fg )
Print list of tests and result of each test.
entailment
def get_log_file_from_item(history): """ Return the log file based on provided history item. Description is optional. """ try: log_file, description = shlex.split(history) except ValueError: log_file = history.strip() return log_file
Return the log file based on provided history item. Description is optional.
entailment
def results_history(history_log, no_color): """Display a list of ipa test results history.""" try: with open(history_log, 'r') as f: lines = f.readlines() except Exception as error: echo_style( 'Unable to process results history log: %s' % error, no_color, fg='red' ) sys.exit(1) index = len(lines) for item in lines: click.echo('{} {}'.format(index, item), nl=False) index -= 1
Display a list of ipa test results history.
entailment
def split_history_item(history): """ Return the log file and optional description for item. """ try: log_file, description = shlex.split(history) except ValueError: log_file = history.strip() description = None return log_file, description
Return the log file and optional description for item.
entailment
def get_working_dir(script__file__): """ hardcoded sets the 'equivalent' working directory if not in git """ start = Path(script__file__).resolve() _root = Path(start.root) working_dir = start while not list(working_dir.glob('.git')): if working_dir == _root: return working_dir = working_dir.parent return working_dir
hardcoded sets the 'equivalent' working directory if not in git
entailment
def sysidpath(ignore_options=False): """ get a unique identifier for the machine running this function """ # in the event we have to make our own # this should not be passed in a as a parameter # since we need these definitions to be more or less static failover = Path('/tmp/machine-id') if not ignore_options: options = ( Path('/etc/machine-id'), failover, # always read to see if we somehow managed to persist this ) for option in options: if (option.exists() and os.access(option, os.R_OK) and option.stat().st_size > 0): return option uuid = uuid4() with open(failover, 'wt') as f: f.write(uuid.hex) return failover
get a unique identifier for the machine running this function
entailment
def chunk_list(list_, size): """ Split a list list_ into sublists of length size. NOTE: len(chunks[-1]) <= size. """ ll = len(list_) if ll <= size: return [list_] elif size == 0: return list_ # or None ?? elif size == 1: return [[l] for l in list_] else: chunks = [] for start, stop in zip(range(0, ll, size), range(size, ll, size)): chunks.append(list_[start:stop]) chunks.append(list_[stop:]) # snag unaligned chunks from last stop return chunks
Split a list list_ into sublists of length size. NOTE: len(chunks[-1]) <= size.
entailment
def NICKNAME(selfPARAMSDEFAULT_OUTPUT): """ DOCSTRING """ {params_conditional} kwargs = {param_rest} kwargs = {dict_comp} param_rest = self._make_rest({required}, **kwargs) url = self._basePath + ('{path}').format(**kwargs) requests_params = {dict_comp2} output = self._get('{method}', url, requests_params, {output}) return output if output else {empty_return_type}
DOCSTRING
entailment
def gencode(self): """ Run this to generate the code """ ledict = requests.get(self.api_url).json() ledict = self.dotopdict(ledict) out = self.dodict(ledict) self._code = out
Run this to generate the code
entailment
def dotopdict(self, dict_): """ Rewrite the 2.0 json to match what we feed the code for 1.2 """ mlookup = {'get':'GET', 'post':'POST'} def rearrange(path, method_dict, method): oid = method_dict['operationId'] self._paths[oid] = path method_dict['nickname'] = oid method_dict['method'] = mlookup[method] paths = dict_['paths'] for path, path_dict in paths.items(): if self.path_prefix and self.path_prefix not in path: continue path_dict['operations'] = [] for method, method_dict in sorted(path_dict.items()): if method == 'operations': continue rearrange(path, method_dict, method) #print(self.operation(method_dict)) path_dict['operations'].append(method_dict) path_dict['path'] = path def setp(v, lenp=len(self.path_prefix)): v['path'] = v['path'][lenp:] return v dict_['apis'] = [] for tag_dict in dict_['tags']: path = '/' + tag_dict['name'] d = {'path':path, 'description':tag_dict['description'], 'class_json':{ 'docstring':tag_dict['description'], 'resourcePath':path, 'apis':[setp(v) for k, v in paths.items() if k.startswith(self.path_prefix + path)]}, } dict_['apis'].append(d) # make sure this is run first so we don't get key errors self._swagger(dict_['swagger']) self._info(dict_['info']) self._definitions(dict_['definitions']) return dict_
Rewrite the 2.0 json to match what we feed the code for 1.2
entailment
def local_imports(remote_base, local_base, ontologies, local_versions=tuple(), readonly=False, dobig=False, revert=False): """ Read the import closure and use the local versions of the files. """ done = [] triples = set() imported_iri_vs_ontology_iri = {} p = owl.imports oi = b'owl:imports' oo = b'owl:Ontology' def inner(local_filepath, remote=False): if noneMembers(local_filepath, *bigleaves) or dobig: ext = os.path.splitext(local_filepath)[-1] if ext == '.ttl': infmt = 'turtle' else: print(ext, local_filepath) infmt = None if remote: resp = requests.get(local_filepath) # TODO nonblocking pull these out, fetch, run inner again until done raw = resp.text.encode() else: try: with open(local_filepath, 'rb') as f: raw = f.read() except FileNotFoundError as e: if local_filepath.startswith('file://'): print('local_imports has already been run, skipping', local_filepath) return #raise ValueError('local_imports has already been run') from e else: print(e) # TODO raise a warning if the file cannot be matched # seems like good practice to have any imported ontology under # version control so all imports are guaranteed to have good # provenance and not split the prior informaiton between the # scigraph config and the repository, the repository remains # the source of truth, load.yaml files can then pick a subset # of the properly tracked files to load as they see fit, but # not add to them (at least in pyontutils land) raw = b'' if oo in raw: # we only care if there are imports or an ontology iri scratch = rdflib.Graph() if infmt == 'turtle': data, rest = raw.split(b'###', 1) elif infmt == None: # assume xml xml_tree = etree.parse(BytesIO(raw)) xml_root = xml_tree.getroot() xml_ontology = xml_tree.xpath("/*[local-name()='RDF']/*[local-name()='Ontology']") xml_root.clear() xml_root.append(xml_ontology[0]) data = etree.tostring(xml_root) scratch.parse(data=data, format=infmt) for s in scratch.subjects(rdf.type, owl.Ontology): triples.add((s, owl.sameAs, rdflib.URIRef(local_filepath))) # somehow this breaks computing the chain #for p in (rdfs.comment, skos.definition, definition, dc.title, rdfs.label): #for o in scratch[s:p]: #triples.add((s, p, o)) for s, o in sorted(scratch.subject_objects(p)): if revert: raise NotImplemented('TODO') nlfp = o.replace(remote_base, local_base) triples.add((s, p, o)) if 'http://' in local_filepath or 'external' in local_filepath: # FIXME what to do about https used inconsistently :/ if 'external' in local_filepath: imported_iri = rdflib.URIRef(local_filepath.replace(local_base, remote_base)) # inefficient else: imported_iri = rdflib.URIRef(local_filepath) if s != imported_iri: imported_iri_vs_ontology_iri[imported_iri] = s # kept for the record triples.add((imported_iri, p, s)) # bridge imported != ontology iri if local_base in nlfp and 'file://' not in o: # FIXME file:// should not be slipping through here... scratch.add((s, p, rdflib.URIRef('file://' + nlfp))) scratch.remove((s, p, o)) if nlfp not in done: done.append(nlfp) if local_base in nlfp and 'external' not in nlfp: # skip externals TODO inner(nlfp) elif readonly: # read external imports if 'external' in nlfp: inner(nlfp) else: inner(nlfp, remote=True) if not readonly: ttl = scratch.serialize(format='nifttl') ndata, comment = ttl.split(b'###', 1) out = ndata + b'###' + rest with open(local_filepath, 'wb') as f: f.write(out) for start in ontologies: print('START', start) done.append(start) inner(start) return sorted(triples)
Read the import closure and use the local versions of the files.
entailment
def id_fix(value): """ fix @prefix values for ttl """ if value.startswith('KSC_M'): pass else: value = value.replace(':','_') if value.startswith('ERO') or value.startswith('OBI') or value.startswith('GO') or value.startswith('UBERON') or value.startswith('IAO'): value = 'obo:' + value elif value.startswith('birnlex') or value.startswith('nlx'): value = 'NIFSTD:' + value elif value.startswith('MESH'): value = ':'.join(value.split('_')) else: value = ':' + value return OntId(value).URIRef
fix @prefix values for ttl
entailment
def write(self, filename, type_='obo'): #FIXME this is bugged """ Write file, will not overwrite files with the same name outputs to obo by default but can also output to ttl if passed type_='ttl' when called. """ if os.path.exists(filename): name, ext = filename.rsplit('.',1) try: prefix, num = name.rsplit('_',1) n = int(num) n += 1 filename = prefix + '_' + str(n) + '.' + ext except ValueError: filename = name + '_1.' + ext print('file exists, renaming to %s' % filename) self.write(filename, type_) else: with open(filename, 'wt', encoding='utf-8') as f: if type_ == 'obo': f.write(str(self)) # FIXME this is incredibly slow for big files :/ elif type_ == 'ttl': f.write(self.__ttl__()) else: raise TypeError('No exporter for file type %s!' % type_)
Write file, will not overwrite files with the same name outputs to obo by default but can also output to ttl if passed type_='ttl' when called.
entailment
def string_profiler(string, start_delimiter='(', end_delimiter=')', remove=True): ''' long = '(life is is good) love world "(blah) blah" "here I am" once again "yes" blah ' print(string_profiler(long)) null = '' print(string_profiler(null)) short = '(life love) yes(and much more)' print(string_profiler(short)) short = 'yes "life love"' print(string_profiler(short)) ''' mark = 0 string_list = [] tmp_string = '' for i in range(len(string)): curr_index = i + mark if curr_index == len(string): break if string[curr_index] == start_delimiter: flag = True else: flag = False if flag: if tmp_string: string_list.extend(tmp_string.strip().split()) tmp_string = '' quoted_string = '' for j in range(curr_index+1, len(string)): mark += 1 if string[j] == end_delimiter: break quoted_string += string[j] if not remove: string_list.append(quoted_string) else: tmp_string += string[curr_index] if tmp_string: string_list.extend(tmp_string.strip().split()) return string_list
long = '(life is is good) love world "(blah) blah" "here I am" once again "yes" blah ' print(string_profiler(long)) null = '' print(string_profiler(null)) short = '(life love) yes(and much more)' print(string_profiler(short)) short = 'yes "life love"' print(string_profiler(short))
entailment
def main(): from IPython import embed """ Python 3.6.6 ibttl 2.605194091796875 ttl 3.8316309452056885 diff lt - ttl -1.2264368534088135 librdfxml 31.267616748809814 rdfxml 58.25124502182007 diff lr - rl -26.983628273010254 simple time 17.405116319656372 """ """ Python 3.5.3 (pypy3) libttl 2.387338638305664 ttl 1.3430471420288086 diff lt - ttl 1.0442914962768555 librdfxml 24.70371127128601 rdfxml 17.85916304588318 diff lr - rl 6.844548225402832 simple time 18.32300615310669 """ # well I guess that answers that question ... # librdf much faster for cpython, not for pypy3 from time import time rdflib.plugin.register('librdfxml', rdflib.parser.Parser, 'librdflib', 'libRdfxmlParser') rdflib.plugin.register('libttl', rdflib.parser.Parser, 'librdflib', 'libTurtleParser') p1 = Path('~/git/NIF-Ontology/ttl/NIF-Molecule.ttl').expanduser() start = time() graph = rdflib.Graph().parse(p1.as_posix(), format='libttl') stop = time() lttime = stop - start print('libttl', lttime) #serialize(graph) start = time() graph = rdflib.Graph().parse(p1.as_posix(), format='turtle') stop = time() ttltime = stop - start print('ttl', ttltime) print('diff lt - ttl', lttime - ttltime) p2 = Path('~/git/NIF-Ontology/ttl/external/uberon.owl').expanduser() start = time() graph2 = rdflib.Graph().parse(p2.as_posix(), format='librdfxml') stop = time() lrtime = stop - start print('librdfxml', lrtime) if True: start = time() graph2 = rdflib.Graph().parse(p2.as_posix(), format='xml') stop = time() rltime = stop - start print('rdfxml', rltime) print('diff lr - rl', lrtime - rltime) if True: file_uri = p2.as_uri() parser = RDF.Parser(name='rdfxml') stream = parser.parse_as_stream(file_uri) start = time() # t = list(stream) t = tuple(statement_to_tuple(statement) for statement in stream) stop = time() stime = stop - start print('simple time', stime) embed()
Python 3.6.6 ibttl 2.605194091796875 ttl 3.8316309452056885 diff lt - ttl -1.2264368534088135 librdfxml 31.267616748809814 rdfxml 58.25124502182007 diff lr - rl -26.983628273010254 simple time 17.405116319656372
entailment
def make_predicate_object_combinator(function, p, o): """ Combinator to hold predicate object pairs until a subject is supplied and then call a function that accepts a subject, predicate, and object. Create a combinator to defer production of a triple until the missing pieces are supplied. Note that the naming here tells you what is stored IN the combinator. The argument to the combinator is the piece that is missing. """ def predicate_object_combinator(subject): return function(subject, p, o) return predicate_object_combinator
Combinator to hold predicate object pairs until a subject is supplied and then call a function that accepts a subject, predicate, and object. Create a combinator to defer production of a triple until the missing pieces are supplied. Note that the naming here tells you what is stored IN the combinator. The argument to the combinator is the piece that is missing.
entailment
def serialize(self, subject, *objects_or_combinators): """ object_combinators may also be URIRefs or Literals """ ec_s = rdflib.BNode() if self.operator is not None: if subject is not None: yield subject, self.predicate, ec_s yield from oc(ec_s) yield from self._list.serialize(ec_s, self.operator, *objects_or_combinators) else: for thing in objects_or_combinators: if isinstance(thing, Combinator): object = rdflib.BNode() #anything = list(thing(object)) #if anything: #[print(_) for _ in anything] hasType = False for t in thing(object): if t[1] == rdf.type: hasType = True yield t if not hasType: yield object, rdf.type, owl.Class else: object = thing yield subject, self.predicate, object
object_combinators may also be URIRefs or Literals
entailment
def update_with(self, **query): """ secure update, mass assignment protected """ for k, v in self._filter_attrs(query).items(): setattr(self, k, v) return self.save()
secure update, mass assignment protected
entailment
def _filter_attrs(cls, attrs): """ attrs: { attr_name: attr_value } if __attr_whitelist__ is True: only attr in __attr_accessible__ AND not in __attr_protected__ will pass else: only attr not in __attr_protected__ OR in __attr_accessible__ will pass """ if cls.__attr_whitelist__: whitelist = cls.__attr_accessible__ - cls.__attr_protected__ return {k: v for k, v in attrs.items() if k in whitelist} else: blacklist = cls.__attr_protected__ - cls.__attr_accessible__ return {k: v for k, v in attrs.items() if k not in blacklist}
attrs: { attr_name: attr_value } if __attr_whitelist__ is True: only attr in __attr_accessible__ AND not in __attr_protected__ will pass else: only attr not in __attr_protected__ OR in __attr_accessible__ will pass
entailment
def _validate(self): """Validate model data and save errors """ errors = {} for name, validator in self._validators.items(): value = getattr(self, name) try: validator(self, value) except ValidationError as e: errors[name] = str(e) self._validate_errors = errors
Validate model data and save errors
entailment
def penn_to_wn(tag): """ Convert between a Penn Treebank tag to a simplified Wordnet tag """ if tag.startswith('N'): return 'n' if tag.startswith('V'): return 'v' if tag.startswith('J'): return 'a' if tag.startswith('R'): return 'r' return None
Convert between a Penn Treebank tag to a simplified Wordnet tag
entailment
def sentence_similarity(sentence1, sentence2): """ compute the sentence similarity using Wordnet """ # Tokenize and tag sentence1 = pos_tag(word_tokenize(sentence1)) sentence2 = pos_tag(word_tokenize(sentence2)) # Get the synsets for the tagged words synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1] synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2] # Filter out the Nones synsets1 = [ss for ss in synsets1 if ss] synsets2 = [ss for ss in synsets2 if ss] #print(synsets1) #print(synsets2) score, count = 0.0, 0.0 # For each word in the first sentence for synset in synsets1: # Get the similarity value of the most similar word in the other sentence best_score=[synset.path_similarity(ss) for ss in synsets2 if synset.path_similarity(ss)] # Check that the similarity could have been computed if best_score: score += max(best_score) count += 1 # Average the values if count > 0: score /= count else: score = 0 return score
compute the sentence similarity using Wordnet
entailment
def command_line(): ''' If you want to use the command line ''' from docopt import docopt doc = docopt( __doc__, version=VERSION ) args = pd.Series({k.replace('--', ''): v for k, v in doc.items()}) if args.all: graph = Graph2Pandas(args.file, _type='all') elif args.type: graph = Graph2Pandas(args.file, _type=args.type) else: graph = Graph2Pandas(args.file) graph.save(args.output)
If you want to use the command line
entailment
def save(self, foldername: str, path_to_folder: str=None) -> None: ''' Saves entities into multiple files within the same folder because of pickle-recursive errors that would happen if squeezed into one ''' self.create_pickle((self.g.namespaces, )) self.df.to_pickle(output)
Saves entities into multiple files within the same folder because of pickle-recursive errors that would happen if squeezed into one
entailment
def qname(self, uri: str) -> str: ''' Returns qname of uri in rdflib graph while also saving it ''' try: prefix, namespace, name = self.g.compute_qname(uri) qname = prefix + ':' + name return qname except: try: print('prefix:', prefix) print('namespace:', namespace) print('name:', name) except: print('Could not print from compute_qname') exit('No qname for ' + uri)
Returns qname of uri in rdflib graph while also saving it
entailment
def Graph2Pandas_converter(self): '''Updates self.g or self.path bc you could only choose 1''' if isinstance(self.path, str) or isinstance(self.path, p): self.path = str(self.path) filetype = p(self.path).suffix if filetype == '.pickle': self.g = pickle.load(open(self.path, 'rb')) if isinstance(self.g, rdflib.graph.Graph): return self.get_sparql_dataframe() else: print('WARNING:: function df() wont work unless an ontology source is loaded') return self.g elif filetype == '.ttl' or filetype == '.rdf': self.g = rdflib.Graph() self.g.parse(self.path, format='turtle') return self.get_sparql_dataframe() elif filetype == '.nt': self.g = rdflib.Graph() self.g.parse(self.path, format='nt') return self.get_sparql_dataframe() elif filetype == '.owl' or filetype == '.xrdf': self.g = rdflib.Graph() try: self.g.parse(self.path, format='xml') except: # some owl formats are more rdf than owl self.g.parse(self.path, format='turtle') return self.get_sparql_dataframe() else: exit('Format options: owl, ttl, df_pickle, rdflib.Graph()') try: return self.get_sparql_dataframe() self.path = None except: exit('Format options: owl, ttl, df_pickle, rdflib.Graph()') elif isinstance(self.g, rdflib.graph.Graph): self.path = None return self.get_sparql_dataframe() else: exit('Obj given is not str, pathlib obj, or an rdflib.Graph()')
Updates self.g or self.path bc you could only choose 1
entailment
def get_sparql_dataframe( self ): ''' Iterates through the sparql table and condenses it into a Pandas DataFrame ''' self.result = self.g.query(self.query) cols = set() # set(['qname']) indx = set() data = {} curr_subj = None # place marker for first subj to be processed bindings = [] for i, binding in enumerate(self.result.bindings): subj_binding = binding[rdflib.term.Variable('subj')] pred_binding = binding[rdflib.term.Variable('pred')] obj_binding = binding[rdflib.term.Variable('obj')] subj = subj_binding pred = pred_binding # self.qname(pred_binding) if self.predicate_as_qname else pred_binding obj = obj_binding # stops at BNodes; could be exanded here if isinstance(subj, BNode): continue elif isinstance(pred, BNode): continue elif isinstance(obj, BNode) and obj: continue # else: # subj = str(subj) # pred = str(pred) # obj = str(obj) cols.add(pred) indx.add(subj) bindings.append(binding) bindings = sorted(bindings, key=lambda k: k[rdflib.term.Variable('subj')]) df = pd.DataFrame(columns=cols, index=indx) for i, binding in enumerate(bindings): subj_binding = binding[rdflib.term.Variable('subj')] pred_binding = binding[rdflib.term.Variable('pred')] obj_binding = binding[rdflib.term.Variable('obj')] subj = subj_binding pred = pred_binding # self.qname(pred_binding) if self.qname_predicates else pred_binding obj = obj_binding # stops at BNodes; could be exanded here if isinstance(subj, BNode): continue elif isinstance(pred, BNode): continue elif isinstance(obj, BNode) and obj: continue # elif self.value_type: # subj = str(subj) # pred = str(pred) # obj = str(obj) if curr_subj == None: curr_subj = subj if not data.get(subj): # Prepare defaultdict home if it doesn't exist data[subj] = defaultdict(list) data[subj][pred].append(obj) elif curr_subj != subj: curr_subj = subj for data_subj, data_pred_objs in data.items(): for data_pred, data_objs in data_pred_objs.items(): if len(data_objs) == 1: # clean lists of just 1 value data_pred_objs[data_pred] = data_objs[0] df.loc[data_subj] = pd.Series(data_pred_objs) data = {} if not data.get(subj): # Prepare defaultdict home if it doesn't exist data[subj] = defaultdict(list) data[subj][pred].append(obj) else: if not data.get(subj): # Prepare defaultdict home if it doesn't exist data[subj] = defaultdict(list) data[subj][pred].append(obj) for data_subj, data_pred_objs in data.items(): for data_pred, data_objs in data_pred_objs.items(): if len(data_objs) == 1: # clean lists of just 1 value data_pred_objs[data_pred] = data_objs[0] df.loc[data_subj] = pd.Series(data_pred_objs) df = df.where((pd.notnull(df)), None) # default Null is fricken Float NaN return df
Iterates through the sparql table and condenses it into a Pandas DataFrame
entailment
def df(self, qname_predicates:bool=False, keep_variable_type:bool=True) -> pd.DataFrame: ''' Multi funcitonal DataFrame with settings ''' local_df = self.df.copy() if qname_predicates: for col in self.columns: local_df.rename({col: self.g.qname(col)}) if not keep_variable_type: pass # convert all to strings, watch out for lists return local_df
Multi funcitonal DataFrame with settings
entailment
def add_namespaces(self, namespaces: Dict[str, str]) -> None: """ Adds a prefix to uri mapping (namespace) in bulk Adds a namespace to replace any uris in iris with shortened prefixes in order to make the file more readable. Not techniqually necessary. Args: namespaces: prefix to uri mappings Example: add_namespaces( namespaces = { 'my_prefix': 'http://myurl.org/', 'memo': 'http://memolibrary.org/memo#', } ) """ for prefix, uri in namespaces.items(): self.add_namespace(prefix=prefix, uri=uri)
Adds a prefix to uri mapping (namespace) in bulk Adds a namespace to replace any uris in iris with shortened prefixes in order to make the file more readable. Not techniqually necessary. Args: namespaces: prefix to uri mappings Example: add_namespaces( namespaces = { 'my_prefix': 'http://myurl.org/', 'memo': 'http://memolibrary.org/memo#', } )
entailment
def qname(self, iri: str) -> str: """ Get qualified name of uri in rdflib graph while also saving it Args: iri: The iri that you want to replace the uri with a known prefix with Returns: qualified name of the iri to be used as the new predicate """ prefix, namespace, name = self.g.compute_qname(uri) qname = prefix + ':' + name self.rqname[qname] = iri return qname
Get qualified name of uri in rdflib graph while also saving it Args: iri: The iri that you want to replace the uri with a known prefix with Returns: qualified name of the iri to be used as the new predicate
entailment
def add_namespace(self, prefix: str, uri: str) -> Namespace: """ Adds a prefix to uri mapping (namespace) Adds a namespace to replace any uris in iris with shortened prefixes in order to make the file more readable. Not techniqually necessary. Args: prefix: prefix that will substitute the uri in the iri uri: the uri in the iri to be substituted by the prefix Returns: A namespace of the uri Example: add_namespace( prefix = "rdfs", uri = 'http://www.w3.org/2000/01/rdf-schema#', ) makes "http://www.w3.org/2000/01/rdf-schema#label 'neuron'@en ;", become "rdfs:label 'neuron'@en ;" """ ns = Namespace(uri) if not self.namespaces.get(prefix): self.namespaces[prefix] = ns self.g.bind(prefix, uri) return ns
Adds a prefix to uri mapping (namespace) Adds a namespace to replace any uris in iris with shortened prefixes in order to make the file more readable. Not techniqually necessary. Args: prefix: prefix that will substitute the uri in the iri uri: the uri in the iri to be substituted by the prefix Returns: A namespace of the uri Example: add_namespace( prefix = "rdfs", uri = 'http://www.w3.org/2000/01/rdf-schema#', ) makes "http://www.w3.org/2000/01/rdf-schema#label 'neuron'@en ;", become "rdfs:label 'neuron'@en ;"
entailment
def find_prefix(self, iri: Union[URIRef, Literal, str]) -> Union[None, str]: """ Finds if uri is in common_namespaces Auto adds prefix if incoming iri has a uri in common_namespaces. If its not in the local library, then it will just be spit out as the iri and not saved/condensed into qualified names. The reason for the maxes is find the longest string match. This is to avoid accidently matching iris with small uris when really is a more complete uri that is the match. Args: iri: iri to be searched to find a known uri in it. Eample: In [1]: print(find_prefix("http://www.w3.org/2000/01/rdf-schema#label")) Out [1]: "http://www.w3.org/2000/01/rdf-schema#" In [2]: print(find_prefix("http://made_up_uri/label")) Out [2]: None """ iri = str(iri) max_iri_len = 0 max_prefix = None for prefix, uri in common_namespaces.items(): if uri in iri and max_iri_len < len(uri): # if matched uri is larger; replace as king max_prefix = prefix max_iri_len = len(uri) return max_prefix
Finds if uri is in common_namespaces Auto adds prefix if incoming iri has a uri in common_namespaces. If its not in the local library, then it will just be spit out as the iri and not saved/condensed into qualified names. The reason for the maxes is find the longest string match. This is to avoid accidently matching iris with small uris when really is a more complete uri that is the match. Args: iri: iri to be searched to find a known uri in it. Eample: In [1]: print(find_prefix("http://www.w3.org/2000/01/rdf-schema#label")) Out [1]: "http://www.w3.org/2000/01/rdf-schema#" In [2]: print(find_prefix("http://made_up_uri/label")) Out [2]: None
entailment
def add_annotation( self, subj: URIRef, pred: URIRef, obj: Union[Literal, URIRef], a_p: URIRef , a_o: Union[Literal, URIRef], ) -> BNode: """ Adds annotation to rdflib graph. The annotation axiom will filled in if this is a new annotation for the triple. Args: subj: Entity subject to be annotated pref: Entities Predicate Anchor to be annotated obj: Entities Object Anchor to be annotated a_p: Annotation predicate a_o: Annotation object Returns: A BNode which is an address to the location in the RDF graph that is storing the annotation information. """ bnode: BNode = self.triple2annotation_bnode.get( (subj, pred, obj) ) if not bnode: a_s: BNode = BNode() self.triple2annotation_bnode[(subj, pred, obj)]: BNode = a_s self.g.add((a_s, RDF.type, OWL.Axiom)) self.g.add((a_s, OWL.annotatedSource, self.process_subj_or_pred(subj))) self.g.add((a_s, OWL.annotatedProperty,self.process_subj_or_pred(pred))) self.g.add((a_s, OWL.annotatedTarget, self.process_obj(obj))) else: a_s: BNode = bnode self.g.add((a_s, self.process_subj_or_pred(a_p), self.process_obj(a_o))) return bnode
Adds annotation to rdflib graph. The annotation axiom will filled in if this is a new annotation for the triple. Args: subj: Entity subject to be annotated pref: Entities Predicate Anchor to be annotated obj: Entities Object Anchor to be annotated a_p: Annotation predicate a_o: Annotation object Returns: A BNode which is an address to the location in the RDF graph that is storing the annotation information.
entailment
def add_triple( self, subj: Union[URIRef, str], pred: Union[URIRef, str], obj: Union[URIRef, Literal, str] ) -> None: """ Adds triple to rdflib Graph Triple can be of any subject, predicate, and object of the entity without a need for order. Args: subj: Entity subject pred: Entity predicate obj: Entity object Example: In [1]: add_triple( ...: 'http://uri.interlex.org/base/ilx_0101431', ...: RDF.type, ...: 'http://www.w3.org/2002/07/owl#Class') ...: ) """ if obj in [None, "", " "]: return # Empty objects are bad practice _subj = self.process_subj_or_pred(subj) _pred = self.process_subj_or_pred(pred) _obj = self.process_obj(obj) self.g.add( (_subj, _pred, _obj) )
Adds triple to rdflib Graph Triple can be of any subject, predicate, and object of the entity without a need for order. Args: subj: Entity subject pred: Entity predicate obj: Entity object Example: In [1]: add_triple( ...: 'http://uri.interlex.org/base/ilx_0101431', ...: RDF.type, ...: 'http://www.w3.org/2002/07/owl#Class') ...: )
entailment
def process_prefix(self, prefix: str) -> Union[Namespace, None]: """ Add namespace to graph if it has a local match This allows qnames to be used without adding their respected namespaces if they are in the common_namespaces local dict. This is is to save a butt-ton of time trying to see what the ontology has as far as uris go. Args: prefix: prefix of the uri in the rdflib namespace to be checked if it exists in the local dict of common_namespaces. Returns: Namespace of uri if add or already exists; else None """ if self.namespaces.get(prefix): return self.namespaces[prefix] iri: str = common_namespaces.get(prefix) if iri: return self.add_namespace(prefix, iri)
Add namespace to graph if it has a local match This allows qnames to be used without adding their respected namespaces if they are in the common_namespaces local dict. This is is to save a butt-ton of time trying to see what the ontology has as far as uris go. Args: prefix: prefix of the uri in the rdflib namespace to be checked if it exists in the local dict of common_namespaces. Returns: Namespace of uri if add or already exists; else None
entailment
def process_subj_or_pred(self, component: Union[URIRef, str]) -> URIRef: """ Adds viable uri from iri or expands viable qname to iri to be triple ready Need to have a viable qualified name (qname) in order to use a qname. You can make it viable by either add the namespace beforehand with add_namespace(s) or if its already in the local common_namespaces preloaded. Args: component: entity subject or predicate to be expanded or have its uri saved. Returns: rdflib URIRef ready subject or predicate to be put into a triple. Raises: SystemExit: When expecting a qname to be expanded, but is not valid or if component is not a qualified name or a iri. """ if 'http' in component: prefix = self.find_prefix(component) # Find uri in iri based on common_namespaces if prefix: self.process_prefix(prefix) # if match, will add to Graph namespaces return URIRef(component) elif ':' in component: presumed_prefix, info = component.split(':', 1) namespace: Union[Namespace, None] = self.process_prefix(presumed_prefix) if not namespace: exit(component + ': qname namespace does\'t exist yet.') return namespace[info] exit(component + ': is not a valid subject or predicate')
Adds viable uri from iri or expands viable qname to iri to be triple ready Need to have a viable qualified name (qname) in order to use a qname. You can make it viable by either add the namespace beforehand with add_namespace(s) or if its already in the local common_namespaces preloaded. Args: component: entity subject or predicate to be expanded or have its uri saved. Returns: rdflib URIRef ready subject or predicate to be put into a triple. Raises: SystemExit: When expecting a qname to be expanded, but is not valid or if component is not a qualified name or a iri.
entailment
def process_obj(self, obj: Union[URIRef, Literal, str]) -> Union[URIRef, Literal]: """ Gives component the proper node type Args: obj: Entity object to be converted to its appropriate node type Returns: URIRef or Literal type of the object provided. Raises: SystemExit: If object is a dict or list it becomes str with broken data. Needs to come in one object at a time. """ if isinstance(obj, dict) or isinstance(obj, list): exit(str(obj) + ': should be str or intended to be a URIRef or Literal.') if isinstance(obj, Literal) or isinstance(obj, URIRef): prefix = self.find_prefix(obj) if prefix: self.process_prefix(prefix) return obj if len(obj) > 8: if 'http' == obj[:4] and '://' in obj and ' ' not in obj: prefix = self.find_prefix(obj) if prefix: self.process_prefix(prefix) return URIRef(obj) if ':' in str(obj): presumed_prefix, info = obj.split(':', 1) namespace: Union[Namespace, None] = self.process_prefix(presumed_prefix) if namespace: return namespace[info] return Literal(obj)
Gives component the proper node type Args: obj: Entity object to be converted to its appropriate node type Returns: URIRef or Literal type of the object provided. Raises: SystemExit: If object is a dict or list it becomes str with broken data. Needs to come in one object at a time.
entailment
def serialize(self, **kwargs) -> str: """ rdflib.Graph().serialize wrapper Original serialize cannot handle PosixPath from pathlib. You should ignore everything, but destination and format. format is a must, but if you don't include a destination, it will just return the formated graph as an str output. Args: destination: Output file path, format: format for for the triple to be put together as: 'xml', 'n3', 'turtle', 'nt', 'pretty-xml', 'trix', 'trig' and 'nquads' are built in. json-ld in rdflib_jsonld base: none encoding: None **args: None """ kwargs = {key: str(value) for key, value in kwargs.items()} return self.g.serialize(**kwargs)
rdflib.Graph().serialize wrapper Original serialize cannot handle PosixPath from pathlib. You should ignore everything, but destination and format. format is a must, but if you don't include a destination, it will just return the formated graph as an str output. Args: destination: Output file path, format: format for for the triple to be put together as: 'xml', 'n3', 'turtle', 'nt', 'pretty-xml', 'trix', 'trig' and 'nquads' are built in. json-ld in rdflib_jsonld base: none encoding: None **args: None
entailment