sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def remove_triple( self, subj: URIRef, pred: URIRef, obj: Union[URIRef, Literal] ) -> None: """ Removes triple from rdflib Graph You must input the triple in its URIRef or Literal form for each node exactly the way it was inputed or it will not delete the triple. Args: subj: Entity subject to be removed it its the only node with this subject; else this is just going to delete a desciption I.E. predicate_object of this entity. pred: Entity predicate to be removed obj: Entity object to be removed """ self.g.remove( (subj, pred, obj) )
Removes triple from rdflib Graph You must input the triple in its URIRef or Literal form for each node exactly the way it was inputed or it will not delete the triple. Args: subj: Entity subject to be removed it its the only node with this subject; else this is just going to delete a desciption I.E. predicate_object of this entity. pred: Entity predicate to be removed obj: Entity object to be removed
entailment
def print_graph(self, format: str = 'turtle') -> str: """ prints serialized formated rdflib Graph """ print(self.g.serialize(format=format).decode('utf-8'))
prints serialized formated rdflib Graph
entailment
def identifierSearches(self, ids=None, LIMIT=25, _print=True, crawl=False): """parameters( data = "list of term_ids" )""" url_base = self.base_url + '/api/1/term/view/{id}' + '?key=' + self.api_key urls = [url_base.format(id=str(_id)) for _id in ids] return self.get( urls=urls, LIMIT=LIMIT, action='Searching For Terms', crawl=crawl, _print=_print)
parameters( data = "list of term_ids" )
entailment
def ilxSearches(self, ilx_ids=None, LIMIT=25, _print=True, crawl=False): """parameters( data = "list of ilx_ids" )""" url_base = self.base_url + "/api/1/ilx/search/identifier/{identifier}?key={APIKEY}" urls = [url_base.format(identifier=ilx_id.replace('ILX:', 'ilx_'), APIKEY=self.api_key) for ilx_id in ilx_ids] return self.get( urls=urls, LIMIT=LIMIT, action='Searching For Terms', crawl=crawl, _print=_print)
parameters( data = "list of ilx_ids" )
entailment
def updateTerms(self, data:list, LIMIT:int=20, _print:bool=True, crawl:bool=False,) -> list: """ Updates existing entities Args: data: needs: id <str> ilx_id <str> options: definition <str> #bug with qutations superclasses [{'id':<int>}] type term, cde, anntation, or relationship <str> synonyms {'literal':<str>} existing_ids {'iri':<str>,'curie':<str>','change':<bool>, 'delete':<bool>} LIMIT: limit of concurrent _print: prints label of data presented crawl: True: Uses linear requests. False: Uses concurrent requests from the asyncio and aiohttp modules Returns: List of filled in data parallel with the input data. If any entity failed with an ignorable reason, it will return empty for the item in the list returned. """ url_base = self.base_url + '/api/1/term/edit/{id}' merged_data = [] # PHP on the server is is LOADED with bugs. Best to just duplicate entity data and change # what you need in it before re-upserting the data. old_data = self.identifierSearches( [d['id'] for d in data], # just need the ids LIMIT = LIMIT, _print = _print, crawl = crawl, ) for d in data: # d for dictionary url = url_base.format(id=str(d['id'])) # Reason this exists is to avoid contradictions in case you are using a local reference if d['ilx'] != old_data[int(d['id'])]['ilx']: print(d['ilx'], old_data[int(d['id'])]['ilx']) exit('You might be using beta insead of production!') merged = scicrunch_client_helper.merge(new=d, old=old_data[int(d['id'])]) merged = scicrunch_client_helper.superclasses_bug_fix(merged) # BUG: superclass output diff than input needed merged_data.append((url, merged)) resp = self.post( merged_data, LIMIT = LIMIT, action = 'Updating Terms', # forced input from each function _print = _print, crawl = crawl, ) return resp
Updates existing entities Args: data: needs: id <str> ilx_id <str> options: definition <str> #bug with qutations superclasses [{'id':<int>}] type term, cde, anntation, or relationship <str> synonyms {'literal':<str>} existing_ids {'iri':<str>,'curie':<str>','change':<bool>, 'delete':<bool>} LIMIT: limit of concurrent _print: prints label of data presented crawl: True: Uses linear requests. False: Uses concurrent requests from the asyncio and aiohttp modules Returns: List of filled in data parallel with the input data. If any entity failed with an ignorable reason, it will return empty for the item in the list returned.
entailment
def addTerms(self, data, LIMIT=25, _print=True, crawl=False): """ need: label <str> type term, cde, anntation, or relationship <str> options: definition <str> #bug with qutations superclasses [{'id':<int>}] synonyms [{'literal':<str>}] existing_ids [{'iri':<str>,'curie':<str>'}] ontologies [{'id':<int>}] [{'type':'term', 'label':'brain'}] """ needed = set([ 'label', 'type', ]) url_base = self.base_url + '/api/1/ilx/add' terms = [] for d in data: if (set(list(d)) & needed) != needed: exit('You need keys: '+ str(needed - set(list(d)))) if not d.get('label') or not d.get('type'): # php wont catch empty type! exit('=== Data is missing label or type! ===') d['term'] = d.pop('label') # ilx only accepts term, will need to replaced back #d['batch-elastic'] = 'True' # term/add and edit should be ready now terms.append((url_base, d)) primer_responses = self.post( terms, action='Priming Terms', LIMIT=LIMIT, _print=_print, crawl=crawl) ilx = {} for primer_response in primer_responses: primer_response['term'] = primer_response['term'].replace('&#39;', "'") primer_response['term'] = primer_response['term'].replace('&#34;', '"') primer_response['label'] = primer_response.pop('term') ilx[primer_response['label'].lower()] = primer_response url_base = self.base_url + '/api/1/term/add' terms = [] for d in data: d['label'] = d.pop('term') d = scicrunch_client_helper.superclasses_bug_fix(d) if not ilx.get(d['label'].lower()): # ilx can be incomplete if errored term continue try: d.update({'ilx': ilx[d['label'].lower()]['ilx']}) except: d.update({'ilx': ilx[d['label'].lower()]['fragment']}) terms.append((url_base, d)) return self.post( terms, action='Adding Terms', LIMIT=LIMIT, _print=_print, crawl=crawl)
need: label <str> type term, cde, anntation, or relationship <str> options: definition <str> #bug with qutations superclasses [{'id':<int>}] synonyms [{'literal':<str>}] existing_ids [{'iri':<str>,'curie':<str>'}] ontologies [{'id':<int>}] [{'type':'term', 'label':'brain'}]
entailment
def getAnnotations_via_tid(self, tids, LIMIT=25, _print=True, crawl=False): """ tids = list of term ids that possess the annoations """ url_base = self.base_url + \ '/api/1/term/get-annotations/{tid}?key=' + self.api_key urls = [url_base.format(tid=str(tid)) for tid in tids] return self.get(urls, LIMIT=LIMIT, _print=_print, crawl=crawl)
tids = list of term ids that possess the annoations
entailment
def getAnnotations_via_id(self, annotation_ids, LIMIT=25, _print=True, crawl=False): """tids = list of strings or ints that are the ids of the annotations themselves""" url_base = self.base_url + \ '/api/1/term/get-annotation/{id}?key=' + self.api_key urls = [ url_base.format(id=str(annotation_id)) for annotation_id in annotation_ids ] return self.get(urls, LIMIT=LIMIT, _print=_print, crawl=crawl)
tids = list of strings or ints that are the ids of the annotations themselves
entailment
def updateAnnotations(self, data, LIMIT=25, _print=True, crawl=False,): """data = [{'id', 'tid', 'annotation_tid', 'value', 'comment', 'upvote', 'downvote', 'curator_status', 'withdrawn', 'term_version', 'annotation_term_version', 'orig_uid', 'orig_time'}] """ url_base = self.base_url + \ '/api/1/term/edit-annotation/{id}' # id of annotation not term id annotations = self.getAnnotations_via_id([d['id'] for d in data], LIMIT=LIMIT, _print=_print, crawl=crawl) annotations_to_update = [] for d in data: annotation = annotations[int(d['id'])] annotation.update({**d}) url = url_base.format(id=annotation['id']) annotations_to_update.append((url, annotation)) self.post(annotations_to_update, LIMIT=LIMIT, action='Updating Annotations', _print=_print, crawl=crawl)
data = [{'id', 'tid', 'annotation_tid', 'value', 'comment', 'upvote', 'downvote', 'curator_status', 'withdrawn', 'term_version', 'annotation_term_version', 'orig_uid', 'orig_time'}]
entailment
def deleteAnnotations(self, annotation_ids, LIMIT=25, _print=True, crawl=False,): """data = list of ids""" url_base = self.base_url + \ '/api/1/term/edit-annotation/{annotation_id}' # id of annotation not term id; thx past troy! annotations = self.getAnnotations_via_id(annotation_ids, LIMIT=LIMIT, _print=_print, crawl=crawl) annotations_to_delete = [] for annotation_id in annotation_ids: annotation = annotations[int(annotation_id)] params = { 'value': ' ', # for delete 'annotation_tid': ' ', # for delete 'tid': ' ', # for delete 'term_version': '1', 'annotation_term_version': '1', } url = url_base.format(annotation_id=annotation_id) annotation.update({**params}) annotations_to_delete.append((url, annotation)) return self.post(annotations_to_delete, LIMIT=LIMIT, _print=_print, crawl=crawl)
data = list of ids
entailment
def addRelationships( self, data: list, LIMIT: int = 20, _print: bool = True, crawl: bool = False, ) -> list: """ data = [{ "term1_id", "term2_id", "relationship_tid", "term1_version", "term2_version", "relationship_term_version",}] """ url_base = self.base_url + '/api/1/term/add-relationship' relationships = [] for relationship in data: relationship.update({ 'term1_version': relationship['term1_version'], 'term2_version': relationship['term2_version'], 'relationship_term_version': relationship['relationship_term_version'] }) relationships.append((url_base, relationship)) return self.post( relationships, LIMIT = LIMIT, action = 'Adding Relationships', _print = _print, crawl = crawl, )
data = [{ "term1_id", "term2_id", "relationship_tid", "term1_version", "term2_version", "relationship_term_version",}]
entailment
def deprecate_entity( self, ilx_id: str, note = None, ) -> None: """ Tagged term in interlex to warn this term is no longer used There isn't an proper way to delete a term and so we have to mark it so I can extrapolate that in mysql/ttl loads. Args: term_id: id of the term of which to be deprecated term_version: version of the term of which to be deprecated Example: deprecateTerm('ilx_0101431', '6') """ term_id, term_version = [(d['id'], d['version']) for d in self.ilxSearches([ilx_id], crawl=True, _print=False).values()][0] annotations = [{ 'tid': term_id, 'annotation_tid': '306375', # id for annotation "deprecated" 'value': 'True', 'term_version': term_version, 'annotation_term_version': '1', # term version for annotation "deprecated" }] if note: editor_note = { 'tid': term_id, 'annotation_tid': '306378', # id for annotation "editorNote" 'value': note, 'term_version': term_version, 'annotation_term_version': '1', # term version for annotation "deprecated" } annotations.append(editor_note) self.addAnnotations(annotations, crawl=True, _print=False) print(annotations)
Tagged term in interlex to warn this term is no longer used There isn't an proper way to delete a term and so we have to mark it so I can extrapolate that in mysql/ttl loads. Args: term_id: id of the term of which to be deprecated term_version: version of the term of which to be deprecated Example: deprecateTerm('ilx_0101431', '6')
entailment
def force_add_term(self, entity: dict): """ Need to add an entity that already has a label existing in InterLex? Well this is the function for you! entity: need: label <str> type term, cde, pde, fde, anntation, or relationship <str> options: definition <str> superclasses [{'id':<int>}] synonyms [{'literal':<str>}] existing_ids [{'iri':<str>,'curie':<str>'}] ontologies [{'id':<int>}] example: entity = [{ 'type':'term', 'label':'brain', 'existing_ids': [{ 'iri':'http://ncbi.org/123', 'curie':'NCBI:123' }] }] """ needed = set([ 'label', 'type', ]) url_ilx_add = self.base_url + '/api/1/ilx/add' url_term_add = self.base_url + '/api/1/term/add' url_term_update = self.base_url + '/api/1/term/edit/{id}' if (set(list(entity)) & needed) != needed: exit('You need keys: '+ str(needed - set(list(d)))) # to ensure uniqueness random_string = ''.join(random.choices(string.ascii_uppercase + string.digits, k=25)) real_label = entity['label'] entity['label'] = entity['label'] + '_' + random_string entity['term'] = entity.pop('label') # ilx only accepts term, will need to replaced back primer_response = self.post([(url_ilx_add, entity.copy())], _print=False, crawl=True)[0] entity['label'] = entity.pop('term') entity['ilx'] = primer_response['fragment'] if primer_response.get('fragment') else primer_response['ilx'] entity = scicrunch_client_helper.superclasses_bug_fix(entity) response = self.post([(url_term_add, entity.copy())], _print=False, crawl=True)[0] old_data = self.identifierSearches( [response['id']], # just need the ids _print = False, crawl = True, )[response['id']] old_data['label'] = real_label entity = old_data.copy() url_term_update = url_term_update.format(id=entity['id']) return self.post([(url_term_update, entity)], _print=False, crawl=True)
Need to add an entity that already has a label existing in InterLex? Well this is the function for you! entity: need: label <str> type term, cde, pde, fde, anntation, or relationship <str> options: definition <str> superclasses [{'id':<int>}] synonyms [{'literal':<str>}] existing_ids [{'iri':<str>,'curie':<str>'}] ontologies [{'id':<int>}] example: entity = [{ 'type':'term', 'label':'brain', 'existing_ids': [{ 'iri':'http://ncbi.org/123', 'curie':'NCBI:123' }] }]
entailment
def create_df_file_with_query(self, query, output): """ Dumps in df in chunks to avoid crashes. """ chunk_size = 100000 offset = 0 data = defaultdict(lambda : defaultdict(list)) with open(output, 'wb') as outfile: query = query.replace(';', '') query += """ LIMIT {chunk_size} OFFSET {offset};""" while True: print(offset) query = query.format( chunk_size=chunk_size, offset=offset ) df = pd.read_sql(query, self.engine) pickle.dump(df, outfile) offset += chunk_size if len(df) < chunk_size: break outfile.close()
Dumps in df in chunks to avoid crashes.
entailment
def diff(s1, s2): ''' --word-diff=porcelain clone''' delta = difflib.Differ().compare(s1.split(), s2.split()) difflist = [] fullline = '' for line in delta: if line[0] == '?': continue elif line[0] == ' ': fullline += line.strip() + ' ' else: if fullline: difflist.append(fullline[:-1]) fullline = '' difflist.append(line) if fullline: difflist.append(fullline[:-1]) return [l[:] for l in '\n'.join(difflist).splitlines() if l]
--word-diff=porcelain clone
entailment
def diffcolor(s1, s2): ''' --word-diff=color clone ''' string = '' for line in diff(s1, s2): if line[0] == '-': string += ' ' + TermColors.red(line[2:]) elif line[0] == '+': string += ' ' + TermColors.green(line[2:]) else: string += ' ' + line return string[1:]
--word-diff=color clone
entailment
def create_html(s1, s2, output='test.html'): ''' creates basic html based on the diff of 2 strings ''' html = difflib.HtmlDiff().make_file(s1.split(), s2.split()) with open(output, 'w') as f: f.write(html)
creates basic html based on the diff of 2 strings
entailment
def traverse_data(obj, key_target): ''' will traverse nested list and dicts until key_target equals the current dict key ''' if isinstance(obj, str) and '.json' in str(obj): obj = json.load(open(obj, 'r')) if isinstance(obj, list): queue = obj.copy() elif isinstance(obj, dict): queue = [obj.copy()] else: sys.exit('obj needs to be a list or dict') count = 0 ''' BFS ''' while not queue or count != 1000: count += 1 curr_obj = queue.pop() if isinstance(curr_obj, dict): for key, value in curr_obj.items(): if key == key_target: return curr_obj else: queue.append(curr_obj[key]) elif isinstance(curr_obj, list): for co in curr_obj: queue.append(co) if count == 1000: sys.exit('traverse_data needs to be updated...') return False
will traverse nested list and dicts until key_target equals the current dict key
entailment
def json_diff(json1, json2, key_target, get_just_diff=True, porcelain=False): ''' creates a (keyname + diff) key within the json of the same layer which key_target resides. Ex: json1={'definition':'data of key_target'}, json2={'definition':'data of key_target'} key_target = 'definition' Usage: json_diff ( json_data1, json_data1 can be both [{..}] and {[..]} or json file path json_data2, json_data2 can be both [{..}] and {[..]} or json file path key_target, <str> of a key within a dict that holds the string data for comparison; EX: 'definition' get_just_diff=True, default=True; will return just the color diff of the 2 strings porcelain=False default=False; porcelain clone as output only as optional ) ''' json1 = json_secretary(json1) json2 = json_secretary(json2) obj1 = traverse_data(json1, key_target) obj2 = traverse_data(json2, key_target) output = diffcolor(obj1[key_target], obj2[key_target]) if porcelain: return diff(obj1[key_target], obj2[key_target]) if get_just_diff: return output obj1[key_target + '_diff'] = output obj2[key_target + '_diff'] = output return json1, json2, output
creates a (keyname + diff) key within the json of the same layer which key_target resides. Ex: json1={'definition':'data of key_target'}, json2={'definition':'data of key_target'} key_target = 'definition' Usage: json_diff ( json_data1, json_data1 can be both [{..}] and {[..]} or json file path json_data2, json_data2 can be both [{..}] and {[..]} or json file path key_target, <str> of a key within a dict that holds the string data for comparison; EX: 'definition' get_just_diff=True, default=True; will return just the color diff of the 2 strings porcelain=False default=False; porcelain clone as output only as optional )
entailment
def memoryCheck(vms_max_kb): """ Lookup vms_max using getCurrentVMSKb """ safety_factor = 1.2 vms_max = vms_max_kb vms_gigs = vms_max / 1024 ** 2 buffer = safety_factor * vms_max buffer_gigs = buffer / 1024 ** 2 vm = psutil.virtual_memory() free_gigs = vm.available / 1024 ** 2 if vm.available < buffer: raise MemoryError('Running this requires quite a bit of memory ~ ' f'{vms_gigs:.2f}, you have {free_gigs:.2f} of the ' f'{buffer_gigs:.2f} needed')
Lookup vms_max using getCurrentVMSKb
entailment
def _sequence_query(self): """ query all sequence rows """ klass = self.__class__ query = klass.select().where(klass.sequence.is_null(False)) seq_scope_field_names =\ (self.__seq_scope_field_name__ or '').split(',') for name in seq_scope_field_names: seq_scope_field = getattr(klass, name, None) if seq_scope_field: seq_scope_field_value = getattr(self, name) query = query.where(seq_scope_field == seq_scope_field_value) return query
query all sequence rows
entailment
def update_sheet_values(spreadsheet_name, sheet_name, values, spreadsheet_service=None): SPREADSHEET_ID = devconfig.secrets(spreadsheet_name) if spreadsheet_service is None: service = get_oauth_service(readonly=False) ss = service.spreadsheets() else: ss = spreadsheet_service """ requests = [ {'updateCells': { 'start': {'sheetId': TODO, 'rowIndex': 0, 'columnIndex': 0} 'rows': {'values'} } }] response = ss.batchUpdate( spreadsheetId=SPREADSHEET_ID, range=sheet_name, body=body).execute() """ body = {'values': values} response = ss.values().update( spreadsheetId=SPREADSHEET_ID, range=sheet_name, valueInputOption='USER_ENTERED', body=body).execute() return response
requests = [ {'updateCells': { 'start': {'sheetId': TODO, 'rowIndex': 0, 'columnIndex': 0} 'rows': {'values'} } }] response = ss.batchUpdate( spreadsheetId=SPREADSHEET_ID, range=sheet_name, body=body).execute()
entailment
def fetch(self, fetch_notes=None): """ update remote values (called automatically at __init__) """ if fetch_notes is None: fetch_notes = self.fetch_notes values, notes_index = get_sheet_values(self.name, self.sheet_name, spreadsheet_service=self._spreadsheet_service, get_notes=fetch_notes) self.raw_values = values self.values = [list(r) for r in zip(*itertools.zip_longest(*self.raw_values, fillvalue=''))] self.byCol = byCol(self.values, to_index=self.index_columns) self.notes_index = notes_index
update remote values (called automatically at __init__)
entailment
def add_types(graph, phenotypes): # TODO missing expression phenotypes! also basket type somehow :( """ Add disjoint union classes so that it is possible to see the invariants associated with individual phenotypes """ collect = defaultdict(set) def recurse(id_, start, level=0): #print(level) for t in graph.g.triples((None, None, id_)): if level == 0: if t[1] != rdflib.term.URIRef('http://www.w3.org/2002/07/owl#someValuesFrom'): continue if type_check(t, (rdflib.term.URIRef, rdflib.term.URIRef, rdflib.term.BNode)): #print(start, t[0]) collect[start].add(t[0]) return # we're done here, otherwise we hit instantiated subclasses if level > 1: if t[1] == rdflib.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#first') or \ t[1] == rdflib.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#rest'): continue recurse(t[0], start, level + 1) for phenotype in phenotypes: recurse(phenotype, phenotype) return collect
Add disjoint union classes so that it is possible to see the invariants associated with individual phenotypes
entailment
def _rest_make_phenotypes(): #phenotype sources neuroner = Path(devconfig.git_local_base, 'neuroNER/resources/bluima/neuroner/hbp_morphology_ontology.obo').as_posix() neuroner1 = Path(devconfig.git_local_base, 'neuroNER/resources/bluima/neuroner/hbp_electrophysiology_ontology.obo').as_posix() neuroner2 = Path(devconfig.git_local_base, 'neuroNER/resources/bluima/neuroner/hbp_electrophysiology-triggers_ontology.obo').as_posix() nif_qual = Path(devconfig.ontology_local_repo, 'ttl/NIF-Quality.ttl').as_posix() mo = OboFile(os.path.expanduser(neuroner)) mo1 = OboFile(os.path.expanduser(neuroner1)) mo2 = OboFile(os.path.expanduser(neuroner2)) mo_ttl = mo.__ttl__() + mo1.__ttl__() + mo2.__ttl__() mo_ttl = """\ @prefix : <http://FIXME.org/> . @prefix nsu: <http://www.FIXME.org/nsupper#> . @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> . @prefix owl: <http://www.w3.org/2002/07/owl#> . @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . """ + mo_ttl #sio = io.StringIO() #sio.write(mo_ttl) ng = rdflib.Graph() ng.parse(data=mo_ttl, format='turtle') ng.parse(os.path.expanduser(nif_qual), format='turtle') #ng.namespace_manager.bind('default1', None, override=False, replace=True) ng.remove((None, rdflib.OWL.imports, None)) bad_match = { 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#nlx_qual_20090505', 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao1693353776', 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao1288413465', 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao4459136323', 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#nlx_qual_20090507', } exact = [] similar = [] quals = [] s2 = {} for subject, label in sorted(ng.subject_objects(rdflib.RDFS.label)): syns = set([a for a in ng.objects(subject, rdflib.URIRef('http://www.FIXME.org/nsupper#synonym'))]) syns.update(set([a for a in ng.objects(subject, rdflib.URIRef('http://ontology.neuinfo.org/NIF/Backend/OBO_annotation_properties.owl#synonym'))])) #if syns: #print(syns) #print(subject) #print(label.lower()) if 'quality' in label.lower(): quals.append((subject, label)) subpre = ng.namespace_manager.compute_qname(subject)[1] llower = rdflib.Literal(label.lower(), lang='en') for s in ng.subjects(rdflib.RDFS.label, llower): if s != subject: exact.append((subject, s, label, llower)) for s, p, o in sorted(ng.triples((None, rdflib.RDFS.label, None))): spre = ng.namespace_manager.compute_qname(s)[1] if subject != s and label.lower() in o.lower().split(' ') and spre != subpre: if s.toPython() in bad_match or subject.toPython() in bad_match: continue #print() #print(spre, subpre) similar.append((subject, s, label, o)) if subpre.toPython() == 'http://FIXME.org/': print('YAY') print(label, ',', o) print(subject, s) subject, s = s, subject label, o = o, label if subject in s2: #print('YES IT EXISTS') #print(syns, label, [subject, s]) s2[subject]['syns'].update(syns) s2[subject]['syns'].add(label) s2[subject]['xrefs'] += [subject, s] else: s2[subject] = {'label': label.toPython(), 'o': o.toPython(), 'xrefs':[subject, s], 'syns':syns} # FIXME overwrites pprint(quals) """ print stuff print('matches') pprint(exact) pprint(similar) #print('EXACT', exact) print() for k, v in s2.items(): print(k) for k, v2 in sorted(v.items()): print(' ', k, ':', v2) #""" desired_nif_terms = set() #{ #'NIFQUAL:sao1959705051', # dendrite #'NIFQUAL:sao2088691397', # axon #'NIFQUAL:sao1057800815', # morphological #'NIFQUAL:sao-1126011106', # soma #'NIFQUAL:', #'NIFQUAL:', #} starts = [ #"NIFQUAL:sao2088691397", #"NIFQUAL:sao1278200674", #"NIFQUAL:sao2088691397", #"NIFQUAL:sao-1126011106", # FIXME WTF IS THIS NONSENSE (scigraph bug?) quote("http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao1959705051").replace('/','%2F'), quote("http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao2088691397").replace('/','%2F'), quote("http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao1278200674").replace('/','%2F'), quote("http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao2088691397").replace('/','%2F'), quote("http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#sao-1126011106").replace('/','%2F'), ] for id_ in starts: want = sgg.getNeighbors(id_, relationshipType='subClassOf', direction='INCOMING', depth=5) #print(id_, want) desired_nif_terms.update([n['id'] for n in want['nodes']]) print(desired_nif_terms) ilx_start = 50114 print(ilx_base.format(ilx_start)) new_terms = {} dg = makeGraph('uwotm8', prefixes=PREFIXES) xr = makeGraph('xrefs', prefixes=PREFIXES) for s, o in sorted(ng.subject_objects(rdflib.RDFS.label))[::-1]: spre = ng.namespace_manager.compute_qname(s)[1] #if spre.toPython() == g.namespaces['NIFQUAL']: #print('skipping', s) #continue # TODO if s in new_terms: print(s, 'already in as xref probably') continue #elif spre.toPython() != 'http://uri.interlex.org/base/ilx_' or spre.toPython() != 'http://FIXME.org/' and s.toPython() not in desired_nif_terms: #elif spre.toPython() != 'http://FIXME.org/' and s.toPython() not in desired_nif_terms: #print('DO NOT WANT', s, spre) #continue syns = set([s for s in ng.objects(s, dg.namespaces['nsu']['synonym'])]) #data['syns'] += syns data = {} id_ = ilx_base.format(ilx_start) ilx_start += 1 if s in s2: d = s2[s] syns.update(d['syns']) new_terms[d['xrefs'][0]] = {'replaced_by':id_} xr.add_trip(d['xrefs'][0], 'oboInOwl:replacedBy', id_) #dg.add_trip(d['xrefs'][0], 'oboInOwl:replacedBy', id_) new_terms[d['xrefs'][1]] = {'replaced_by':id_} xr.add_trip(d['xrefs'][1], 'oboInOwl:replacedBy', id_) #dg.add_trip(d['xrefs'][1], 'oboInOwl:replacedBy', id_) data['labels'] = [d['label'], d['o']] #dg.add_trip(id_, rdflib.RDFS.label, d['label']) dg.add_trip(id_, rdflib.RDFS.label, d['o']) data['xrefs'] = d['xrefs'] for x in d['xrefs']: # FIXME... expecting order of evaluation errors here... dg.add_trip(id_, 'oboInOwl:hasDbXref', x) # xr xr.add_trip(id_, 'oboInOwl:hasDbXref', x) # x elif spre.toPython() != 'http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Quality.owl#' or ng.namespace_manager.qname(s).replace('default1','NIFQUAL') in desired_nif_terms: # skip non-xref quals #print(ng.namespace_manager.qname(s).replace('default1','NIFQUAL')) new_terms[s] = {'replaced_by':id_} xr.add_trip(s, 'oboInOwl:replacedBy', id_) data['labels'] = [o.toPython()] dg.add_trip(id_, rdflib.RDFS.label, o.toPython()) data['xrefs'] = [s] dg.add_trip(id_, 'oboInOwl:hasDbXref', s) # xr xr.add_trip(id_, 'oboInOwl:hasDbXref', s) # xr else: ilx_start -= 1 continue new_terms[id_] = data dg.add_trip(id_, rdflib.RDF.type, rdflib.OWL.Class) xr.add_trip(id_, rdflib.RDF.type, rdflib.OWL.Class) for syn in syns: if syn.toPython() not in data['labels']: if len(syn) > 3: dg.add_trip(id_, 'NIFRID:synonym', syn) elif syn: dg.add_trip(id_, 'NIFRID:abbrev', syn) if 'EPHYS' in s or any(['EPHYS' in x for x in data['xrefs']]): dg.add_trip(id_, rdflib.RDFS.subClassOf, ephys_phenotype) elif 'MORPHOLOGY' in s or any(['MORPHOLOGY' in x for x in data['xrefs']]): dg.add_trip(id_, rdflib.RDFS.subClassOf, morpho_phenotype) #dg.write(convert=False) xr.write(convert=False) #skip this for now, we can use DG to do lookups later #for t in dg.g.triples((None, None, None)): #g.add_trip(*t) # only way to clean prefixes :/ add_phenotypes(g) g.write(convert=False) g2 = makeGraph('pheno-comp', PREFIXES) for t in ng.triples((None, None, None)): g2.add_trip(*t) # only way to clean prefixes :/ g2.write(convert=False) syn_mappings = {} for sub, syn in [_ for _ in g.g.subject_objects(g.expand('NIFRID:synonym'))] + [_ for _ in g.g.subject_objects(rdflib.RDFS.label)]: syn = syn.toPython() if syn in syn_mappings: log.error(f'duplicate synonym! {syn} {sub}') syn_mappings[syn] = sub #embed() return syn_mappings, pedges, ilx_start
print stuff print('matches') pprint(exact) pprint(similar) #print('EXACT', exact) print() for k, v in s2.items(): print(k) for k, v2 in sorted(v.items()): print(' ', k, ':', v2) #
entailment
def config(self): """ Allows changing the config on the fly """ # TODO more efficient to read once and put watch on the file config = {} if self.config_file.exists(): with open(self.config_file.as_posix(), 'rt') as f: # 3.5/pypy3 can't open Path directly config = {k:self._override[k] if k in self._override else v for k, v in yaml.safe_load(f).items()} return config
Allows changing the config on the fly
entailment
def _get_service_account_info(self): """Retrieve json dict from service account file.""" with open(self.service_account_file, 'r') as f: info = json.load(f) self.service_account_email = info.get('client_email') if not self.service_account_email: raise GCECloudException( 'Service account JSON file is invalid for GCE. ' 'client_email key is expected. See getting started ' 'docs for information on GCE configuration.' ) self.service_account_project = info.get('project_id') if not self.service_account_project: raise GCECloudException( 'Service account JSON file is invalid for GCE. ' 'project_id key is expected. See getting started ' 'docs for information on GCE configuration.' )
Retrieve json dict from service account file.
entailment
def _get_driver(self): """Get authenticated GCE driver.""" ComputeEngine = get_driver(Provider.GCE) return ComputeEngine( self.service_account_email, self.service_account_file, project=self.service_account_project )
Get authenticated GCE driver.
entailment
def _get_instance(self): """Retrieve instance matching instance_id.""" try: instance = self.compute_driver.ex_get_node( self.running_instance_id, zone=self.region ) except ResourceNotFoundError as e: raise GCECloudException( 'Instance with id: {id} cannot be found: {error}'.format( id=self.running_instance_id, error=e ) ) return instance
Retrieve instance matching instance_id.
entailment
def _get_ssh_public_key(self): """Generate SSH public key from private key.""" key = ipa_utils.generate_public_ssh_key(self.ssh_private_key_file) return '{user}:{key} {user}'.format( user=self.ssh_user, key=key.decode() )
Generate SSH public key from private key.
entailment
def _launch_instance(self): """Launch an instance of the given image.""" metadata = {'key': 'ssh-keys', 'value': self.ssh_public_key} self.running_instance_id = ipa_utils.generate_instance_name( 'gce-ipa-test' ) self.logger.debug('ID of instance: %s' % self.running_instance_id) kwargs = { 'location': self.region, 'ex_metadata': metadata, 'ex_service_accounts': [{ 'email': self.service_account_email, 'scopes': ['storage-ro'] }] } if self.subnet_id: kwargs['ex_subnetwork'] = self._get_subnet(self.subnet_id) kwargs['ex_network'] = kwargs['ex_subnetwork'].network try: instance = self.compute_driver.create_node( self.running_instance_id, self.instance_type or GCE_DEFAULT_TYPE, self.image_id, **kwargs ) except ResourceNotFoundError as error: try: message = error.value['message'] except TypeError: message = error raise GCECloudException( 'An error occurred launching instance: {message}.'.format( message=message ) ) self.compute_driver.wait_until_running( [instance], timeout=self.timeout )
Launch an instance of the given image.
entailment
def _validate_region(self): """Validate region was passed in and is a valid GCE zone.""" if not self.region: raise GCECloudException( 'Zone is required for GCE cloud framework: ' 'Example: us-west1-a' ) try: zone = self.compute_driver.ex_get_zone(self.region) except Exception: zone = None if not zone: raise GCECloudException( '{region} is not a valid GCE zone. ' 'Example: us-west1-a'.format( region=self.region ) )
Validate region was passed in and is a valid GCE zone.
entailment
def _set_instance_ip(self): """Retrieve and set the instance ip address.""" instance = self._get_instance() if instance.public_ips: self.instance_ip = instance.public_ips[0] elif instance.private_ips: self.instance_ip = instance.private_ips[0] else: raise GCECloudException( 'IP address for instance: %s cannot be found.' % self.running_instance_id )
Retrieve and set the instance ip address.
entailment
def _start_instance(self): """Start the instance.""" instance = self._get_instance() self.compute_driver.ex_start_node(instance) self.compute_driver.wait_until_running( [instance], timeout=self.timeout )
Start the instance.
entailment
def _stop_instance(self): """Stop the instance.""" instance = self._get_instance() self.compute_driver.ex_stop_node(instance) self._wait_on_instance('stopped', timeout=self.timeout)
Stop the instance.
entailment
def grab_rdflib_graph_version(g: Graph) -> str: ''' Crap-shot for ontology iri if its properly in the header and correctly formated ''' version = g.subject_objects( predicate = URIRef( OWL.versionIRI ) ) version = [o for s, o in version] if len(version) != 1: print('versioning isn\'t correct') else: version = str(version[0]) return version
Crap-shot for ontology iri if its properly in the header and correctly formated
entailment
def fix_ilx(self, ilx_id: str) -> str: ''' Database only excepts lower case and underscore version of ID ''' ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '') if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']: raise ValueError( 'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id) return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')
Database only excepts lower case and underscore version of ID
entailment
def pull_int_tail(self, string: str) -> str: ''' Useful for IDs that have giberish in the front of the real ID ''' int_tail = '' for element in string[::-1]: try: int(element) int_tail = element + int_tail except: pass return int_tail
Useful for IDs that have giberish in the front of the real ID
entailment
def extract_fragment(self, iri: str) -> str: ''' Pulls only for code/ID from the iri I only add the str() conversion for the iri because rdflib objects need to be converted. ''' fragment = str(iri).rsplit('/')[-1].split(':', 1)[-1].split('#', 1)[-1].split('_', 1)[-1] return fragment
Pulls only for code/ID from the iri I only add the str() conversion for the iri because rdflib objects need to be converted.
entailment
def curie_search(self, curie:str) -> dict: ''' Returns the row in InterLex associated with the curie Note: Pressumed to not have duplicate curies in InterLex Args: curie: The "prefix:fragment_id" of the existing_id pertaining to the ontology Returns: None or dict ''' ilx_row = self.curie2row.get(curie) if not ilx_row: return None else: return ilx_row
Returns the row in InterLex associated with the curie Note: Pressumed to not have duplicate curies in InterLex Args: curie: The "prefix:fragment_id" of the existing_id pertaining to the ontology Returns: None or dict
entailment
def fragment_search(self, fragement:str) -> List[dict]: ''' Returns the rows in InterLex associated with the fragment Note: Pressumed to have duplicate fragements in InterLex Args: fragment: The fragment_id of the curie pertaining to the ontology Returns: None or List[dict] ''' fragement = self.extract_fragment(fragement) ilx_rows = self.fragment2rows.get(fragement) if not ilx_rows: return None else: return ilx_rows
Returns the rows in InterLex associated with the fragment Note: Pressumed to have duplicate fragements in InterLex Args: fragment: The fragment_id of the curie pertaining to the ontology Returns: None or List[dict]
entailment
def label_search(self, label:str) -> List[dict]: ''' Returns the rows in InterLex associated with that label Note: Pressumed to have duplicated labels in InterLex Args: label: label of the entity you want to find Returns: None or List[dict] ''' ilx_rows = self.label2rows(self.local_degrade(label)) if not ilx_rows: return None else: return ilx_rows
Returns the rows in InterLex associated with that label Note: Pressumed to have duplicated labels in InterLex Args: label: label of the entity you want to find Returns: None or List[dict]
entailment
def readyup_entity( self, label: str, type: str, uid: Union[int, str] = None, comment: str = None, definition: str = None, superclass: str = None, synonyms: list = None, existing_ids: List[dict] = None, ) -> dict: ''' Setups the entity to be InterLex ready Args: label: name of entity type: entities type Can be any of the following: term, cde, fde, pde, annotation, relationship uid: usually fine and auto completes to api user ID, but if you provide one with a clearance higher than 0 you can make your own custom. Good for mass imports by one person to avoid label collides. definition: entities definition comment: a foot note regarding either the interpretation of the data or the data itself superclass: entity is a sub-part of this entity Example: Organ is a superclass to Brain synonyms: entity synonyms existing_ids: existing curie/iris that link data | couldnt format this easier Returns: dict ''' entity = dict( label = label, type = type, ) if uid: entity['uid'] = uid if definition: entity['definition'] = definition if comment: entity['comment'] = comment if superclass: entity['superclass'] = {'ilx_id':self.fix_ilx(superclass)} if synonyms: entity['synonyms'] = [{'literal': syn} for syn in synonyms] if existing_ids: if existing_ids[0].get('curie') and existing_ids[0].get('iri'): pass else: exit('Need curie and iri for existing_ids in List[dict] form') entity['existing_ids'] = existing_ids return entity
Setups the entity to be InterLex ready Args: label: name of entity type: entities type Can be any of the following: term, cde, fde, pde, annotation, relationship uid: usually fine and auto completes to api user ID, but if you provide one with a clearance higher than 0 you can make your own custom. Good for mass imports by one person to avoid label collides. definition: entities definition comment: a foot note regarding either the interpretation of the data or the data itself superclass: entity is a sub-part of this entity Example: Organ is a superclass to Brain synonyms: entity synonyms existing_ids: existing curie/iris that link data | couldnt format this easier Returns: dict
entailment
def __exhaustive_diff(self, check_list:List[dict]) -> List[List[dict]]: ''' Helper for exhaustive checks to see if there any matches at all besides the anchor OUTPUT: [ { 'external_ontology_row' : {}, 'interlex_row' : {}, 'same': {}, }, ... ], ''' def compare_rows(external_row:dict, ilx_row:dict) -> List[dict]: ''' dictionary comparator ''' def compare_values(string1:Union[str, None], string2:Union[str, None]) -> bool: ''' string comparator ''' if string1 is None or string2 is None: return False elif not isinstance(string1, str) or not isinstance(string2, str): return False elif string1.lower().strip() != string2.lower().strip(): return False else: return True accepted_ilx_keys = ['label', 'definition'] local_diff = set() for external_key, external_value in external_row.items(): if not external_value: continue if isinstance(external_value, list): external_values = external_value for external_value in external_values: for ilx_key, ilx_value in ilx_row.items(): if ilx_key not in accepted_ilx_keys: continue if compare_values(external_value, ilx_value): local_diff.add( #((external_key, external_value), (ilx_key, ilx_value)) ilx_key # best to just have what you need and infer the rest :) ) else: for ilx_key, ilx_value in ilx_row.items(): if ilx_key not in accepted_ilx_keys: continue if compare_values(external_value, ilx_value): local_diff.add( #((external_key, external_value), (ilx_key, ilx_value)) ilx_key # best to just have what you need and infer the rest :) ) local_diff = list(local_diff) diff = { 'external_ontology_row': external_row, 'ilx_row': ilx_row, 'same': local_diff, } return diff diff = [] for check_dict in check_list: external_ontology_row = check_dict['external_ontology_row'] diff.append( [compare_rows(external_ontology_row, ilx_row) for ilx_row in check_dict['ilx_rows']] ) return diff
Helper for exhaustive checks to see if there any matches at all besides the anchor OUTPUT: [ { 'external_ontology_row' : {}, 'interlex_row' : {}, 'same': {}, }, ... ],
entailment
def exhaustive_label_check( self, ontology:pd.DataFrame, label_predicate='rdfs:label', diff:bool=True, ) -> Tuple[list]: ''' All entities with conflicting labels gets a full diff Args: ontology: pandas DataFrame created from an ontology where the colnames are predicates and if classes exist it is also thrown into a the colnames. label_predicate: usually in qname form and is the colname of the DataFrame for the label diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2 Returns: inside: entities that are inside of InterLex outside: entities NOT in InterLex diff (optional): List[List[dict]]... so complicated but usefull diff between matches only ''' inside, outside = [], [] header = ['Index'] + list(ontology.columns) for row in ontology.itertuples(): row = {header[i]:val for i, val in enumerate(row)} label_obj = row[label_predicate] if isinstance(label_obj, list): if len(label_obj) != 1: exit('Need to have only 1 label in the cell from the onotology.') else: label_obj = label_obj[0] entity_label = self.local_degrade(label_obj) ilx_rows = self.label2rows.get(entity_label) if ilx_rows: inside.append({ 'external_ontology_row': row, 'ilx_rows': ilx_rows, }) else: outside.append(row) if diff: diff = self.__exhaustive_diff(inside) return inside, outside, diff return inside, outside
All entities with conflicting labels gets a full diff Args: ontology: pandas DataFrame created from an ontology where the colnames are predicates and if classes exist it is also thrown into a the colnames. label_predicate: usually in qname form and is the colname of the DataFrame for the label diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2 Returns: inside: entities that are inside of InterLex outside: entities NOT in InterLex diff (optional): List[List[dict]]... so complicated but usefull diff between matches only
entailment
def exhaustive_iri_check( self, ontology:pd.DataFrame, iri_predicate:str, diff:bool=True, ) -> Tuple[list]: ''' All entities with conflicting iris gets a full diff to see if they belong Args: ontology: pandas DataFrame created from an ontology where the colnames are predicates and if classes exist it is also thrown into a the colnames. iri_predicate: usually in qname form and is the colname of the DataFrame for iri Default is "iri" for graph2pandas module diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2 Returns: inside: entities that are inside of InterLex outside: entities NOT in InterLex diff (optional): List[List[dict]]... so complicated but usefull diff between matches only ''' inside, outside = [], [] header = ['Index'] + list(ontology.columns) for row in ontology.itertuples(): row = {header[i]:val for i, val in enumerate(row)} entity_iri = row[iri_predicate] if isinstance(entity_iri, list): if len(entity_iri) != 0: exit('Need to have only 1 iri in the cell from the onotology.') else: entity_iri = entity_iri[0] ilx_row = self.iri2row.get(entity_iri) if ilx_row: inside.append({ 'external_ontology_row': row, 'ilx_rows': [ilx_row], }) else: outside.append(row) if diff: diff = self.__exhaustive_diff(inside) return inside, outside, diff return inside, outside
All entities with conflicting iris gets a full diff to see if they belong Args: ontology: pandas DataFrame created from an ontology where the colnames are predicates and if classes exist it is also thrown into a the colnames. iri_predicate: usually in qname form and is the colname of the DataFrame for iri Default is "iri" for graph2pandas module diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2 Returns: inside: entities that are inside of InterLex outside: entities NOT in InterLex diff (optional): List[List[dict]]... so complicated but usefull diff between matches only
entailment
def exhaustive_curie_check( self, ontology:pd.DataFrame, curie_predicate:str, curie_prefix:str, diff:bool=True, ) -> Tuple[list]: ''' All entities with conflicting curies gets a full diff to see if they belong Args: ontology: pandas DataFrame created from an ontology where the colnames are predicates and if classes exist it is also thrown into a the colnames. curie_predicate: usually in qname form and is the colname of the DataFrame curie_prefix: Not all cells in the DataFrame will have complete curies so we extract the fragement from the cell and use the prefix to complete it. diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2 Returns: inside: entities that are inside of InterLex outside: entities NOT in InterLex diff (optional): List[List[dict]]... so complicated but usefull diff between matches only ''' inside, outside = [], [] curie_prefix = curie_prefix.replace(':', '') # just in case I forget a colon isnt in a prefix header = ['Index'] + list(ontology.columns) for row in ontology.itertuples(): row = {header[i]:val for i, val in enumerate(row)} entity_curie = row[curie_predicate] if isinstance(entity_curie, list): if len(entity_curie) != 0: exit('Need to have only 1 iri in the cell from the onotology.') else: entity_curie = entity_curie[0] entity_curie = curie_prefix + ':' + self.extract_fragment(entity_curie) ilx_row = self.curie2row.get(entity_curie) if ilx_row: inside.append({ 'external_ontology_row': row, 'ilx_rows': [ilx_row], }) else: outside.append(row) if diff: diff = self.__exhaustive_diff(inside) return inside, outside, diff return inside, outside
All entities with conflicting curies gets a full diff to see if they belong Args: ontology: pandas DataFrame created from an ontology where the colnames are predicates and if classes exist it is also thrown into a the colnames. curie_predicate: usually in qname form and is the colname of the DataFrame curie_prefix: Not all cells in the DataFrame will have complete curies so we extract the fragement from the cell and use the prefix to complete it. diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2 Returns: inside: entities that are inside of InterLex outside: entities NOT in InterLex diff (optional): List[List[dict]]... so complicated but usefull diff between matches only
entailment
def exhaustive_fragment_check( self, ontology:pd.DataFrame, iri_curie_fragment_predicate:str = 'iri', cross_reference_iris:bool = False, cross_reference_fragments:bool = False, diff:bool = True, ) -> Tuple[list]: ''' All entities with conflicting fragments gets a full diff to see if they belong Args: ontology: pandas DataFrame created from an ontology where the colnames are predicates and if classes exist it is also thrown into a the colnames. iri_curie_fragment_predicate: usually in qname form and is the colname of the DataFrame for iri Default is "iri" for graph2pandas module diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2 Returns: inside: entities that are inside of InterLex outside: entities NOT in InterLex diff (optional): List[List[dict]]... so complicated but usefull diff between matches only ''' inside, outside = [], [] header = ['Index'] + list(ontology.columns) for row in ontology.itertuples(): row = {header[i]:val for i, val in enumerate(row)} entity_suffix = row[iri_curie_fragment_predicate] if isinstance(entity_suffix, list): if len(entity_suffix) != 0: exit('Need to have only 1 iri in the cell from the onotology.') else: entity_suffix = entity_suffix[0] entity_fragment = self.extract_fragment(entity_suffix) ilx_rows = self.fragment2rows.get(entity_fragment) if cross_reference_fragments and ilx_rows: ilx_rows = [row for row in ilx_rows if entity_fragment.lower() in row['iri'].lower()] if cross_reference_iris and ilx_rows: # true suffix of iris ilx_rows = [row for row in ilx_rows if entity_suffix.rsplit('/', 1)[-1].lower() in row['iri'].lower()] if ilx_rows: inside.append({ 'external_ontology_row': row, 'ilx_rows': ilx_rows, }) else: outside.append(row) if diff: diff = self.__exhaustive_diff(inside) return inside, outside, diff return inside, outside
All entities with conflicting fragments gets a full diff to see if they belong Args: ontology: pandas DataFrame created from an ontology where the colnames are predicates and if classes exist it is also thrown into a the colnames. iri_curie_fragment_predicate: usually in qname form and is the colname of the DataFrame for iri Default is "iri" for graph2pandas module diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2 Returns: inside: entities that are inside of InterLex outside: entities NOT in InterLex diff (optional): List[List[dict]]... so complicated but usefull diff between matches only
entailment
def exhaustive_ontology_ilx_diff_row_only( self, ontology_row: dict ) -> dict: ''' WARNING RUNTIME IS AWEFUL ''' results = [] header = ['Index'] + list(self.existing_ids.columns) for row in self.existing_ids.itertuples(): row = {header[i]:val for i, val in enumerate(row)} check_list = [ { 'external_ontology_row': ontology_row, 'ilx_rows': [row], }, ] # First layer for each external row. Second is for each potential ilx row. It's simple here 1-1. result = self.__exhaustive_diff(check_list)[0][0] if result['same']: results.append(result) return results
WARNING RUNTIME IS AWEFUL
entailment
def combo_exhaustive_label_definition_check( self, ontology: pd.DataFrame, label_predicate:str, definition_predicates:str, diff = True) -> List[List[dict]]: ''' Combo of label & definition exhaustive check out of convenience Args: ontology: pandas DataFrame created from an ontology where the colnames are predicates and if classes exist it is also thrown into a the colnames. label_predicate: usually in qname form and is the colname of the DataFrame for the label diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2 Returns: inside: entities that are inside of InterLex outside: entities NOT in InterLex diff (optional): List[List[dict]]... so complicated but usefull diff between matches only ''' inside, outside = [], [] header = ['Index'] + list(ontology.columns) for row in ontology.itertuples(): row = {header[i]:val for i, val in enumerate(row)} label_obj = row[label_predicate] if isinstance(label_obj, list): if len(label_obj) != 1: exit('Need to have only 1 label in the cell from the onotology.') else: label_obj = label_obj[0] entity_label = self.local_degrade(label_obj) label_search_results = self.label2rows.get(entity_label) label_ilx_rows = label_search_results if label_search_results else [] definition_ilx_rows = [] for definition_predicate in definition_predicates: definition_objs = row[definition_predicate] if not definition_objs: continue definition_objs = [definition_objs] if not isinstance(definition_objs, list) else definition_objs for definition_obj in definition_objs: definition_obj = self.local_degrade(definition_obj) definition_search_results = self.definition2rows.get(definition_obj) if definition_search_results: definition_ilx_rows.extend(definition_search_results) ilx_rows = [dict(t) for t in {tuple(d.items()) for d in (label_ilx_rows + definition_ilx_rows)}] if ilx_rows: inside.append({ 'external_ontology_row': row, 'ilx_rows': ilx_rows, }) else: outside.append(row) if diff: diff = self.__exhaustive_diff(inside) return inside, outside, diff return inside, outside
Combo of label & definition exhaustive check out of convenience Args: ontology: pandas DataFrame created from an ontology where the colnames are predicates and if classes exist it is also thrown into a the colnames. label_predicate: usually in qname form and is the colname of the DataFrame for the label diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2 Returns: inside: entities that are inside of InterLex outside: entities NOT in InterLex diff (optional): List[List[dict]]... so complicated but usefull diff between matches only
entailment
def clear_cache(ip=None): """Clear the client cache or remove key matching the given ip.""" if ip: with ignored(Exception): client = CLIENT_CACHE[ip] del CLIENT_CACHE[ip] client.close() else: for client in CLIENT_CACHE.values(): with ignored(Exception): client.close() CLIENT_CACHE.clear()
Clear the client cache or remove key matching the given ip.
entailment
def establish_ssh_connection(ip, ssh_private_key_file, ssh_user, port, attempts=5, timeout=None): """ Establish ssh connection and return paramiko client. Raises: IpaSSHException: If connection cannot be established in given number of attempts. """ client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) while attempts: try: client.connect( ip, port=port, username=ssh_user, key_filename=ssh_private_key_file, timeout=timeout ) except: # noqa: E722 attempts -= 1 time.sleep(10) else: return client raise IpaSSHException( 'Failed to establish SSH connection to instance.' )
Establish ssh connection and return paramiko client. Raises: IpaSSHException: If connection cannot be established in given number of attempts.
entailment
def execute_ssh_command(client, cmd): """ Execute given command using paramiko. Returns: String output of cmd execution. Raises: IpaSSHException: If stderr returns a non-empty string. """ try: stdin, stdout, stderr = client.exec_command(cmd) err = stderr.read() out = stdout.read() if err: raise IpaSSHException(out.decode() + err.decode()) except: # noqa: E722 raise return out.decode()
Execute given command using paramiko. Returns: String output of cmd execution. Raises: IpaSSHException: If stderr returns a non-empty string.
entailment
def extract_archive(client, archive_path, extract_path=None): """ Extract the archive in current path using the provided client. If extract_path is provided extract the archive there. """ command = 'tar -xf {path}'.format(path=archive_path) if extract_path: command += ' -C {extract_path}'.format(extract_path=extract_path) out = execute_ssh_command(client, command) return out
Extract the archive in current path using the provided client. If extract_path is provided extract the archive there.
entailment
def generate_public_ssh_key(ssh_private_key_file): """Generate SSH public key from private key file.""" try: with open(ssh_private_key_file, "rb") as key_file: key = key_file.read() except FileNotFoundError: raise IpaUtilsException( 'SSH private key file: %s cannot be found.' % ssh_private_key_file ) try: private_key = serialization.load_pem_private_key( key, password=None, backend=default_backend() ) except ValueError: raise IpaUtilsException( 'SSH private key file: %s is not a valid key file.' % ssh_private_key_file ) return private_key.public_key().public_bytes( serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH )
Generate SSH public key from private key file.
entailment
def get_config_values(config_path, section, default='default'): """ Parse ini config file and return a dict of values. The provided section overrides any values in default section. """ values = {} if not os.path.isfile(config_path): raise IpaUtilsException( 'Config file not found: %s' % config_path ) config = configparser.ConfigParser() try: config.read(config_path) except Exception: raise IpaUtilsException( 'Config file format invalid.' ) try: values.update(config.items(default)) except Exception: pass try: values.update(config.items(section)) except Exception: pass return values
Parse ini config file and return a dict of values. The provided section overrides any values in default section.
entailment
def get_ssh_client(ip, ssh_private_key_file, ssh_user='root', port=22, timeout=600, wait_period=10): """Attempt to establish and test ssh connection.""" if ip in CLIENT_CACHE: return CLIENT_CACHE[ip] start = time.time() end = start + timeout client = None while time.time() < end: try: client = establish_ssh_connection( ip, ssh_private_key_file, ssh_user, port, timeout=wait_period ) execute_ssh_command(client, 'ls') except: # noqa: E722 if client: client.close() wait_period += wait_period else: CLIENT_CACHE[ip] = client return client raise IpaSSHException( 'Attempt to establish SSH connection failed.' )
Attempt to establish and test ssh connection.
entailment
def get_yaml_config(config_path): """ Load yaml config file and return dictionary. Todo: * This will need refactoring similar to the test search. """ config_path = os.path.expanduser(config_path) if not os.path.isfile(config_path): raise IpaUtilsException( 'Config file not found: %s' % config_path ) with open(config_path, 'r') as f: config = yaml.safe_load(f) return config
Load yaml config file and return dictionary. Todo: * This will need refactoring similar to the test search.
entailment
def parse_sync_points(names, tests): """ Slice list of test names on sync points. If test is test file find full path to file. Returns: A list of test file sets and sync point strings. Examples: ['test_hard_reboot'] [set('test1', 'test2')] [set('test1', 'test2'), 'test_soft_reboot'] [set('test1', 'test2'), 'test_soft_reboot', set('test3')] """ test_files = [] section = set() for name in names: if name in SYNC_POINTS: if section: test_files.append(section) test_files.append(name) section = set() else: section.add(find_test_file(name, tests)) if section: test_files.append(section) return test_files
Slice list of test names on sync points. If test is test file find full path to file. Returns: A list of test file sets and sync point strings. Examples: ['test_hard_reboot'] [set('test1', 'test2')] [set('test1', 'test2'), 'test_soft_reboot'] [set('test1', 'test2'), 'test_soft_reboot', set('test3')]
entailment
def put_file(client, source_file, destination_file): """ Copy file to instance using Paramiko client connection. """ try: sftp_client = client.open_sftp() sftp_client.put(source_file, destination_file) except Exception as error: raise IpaUtilsException( 'Error copying file to instance: {0}.'.format(error) ) finally: with ignored(Exception): sftp_client.close()
Copy file to instance using Paramiko client connection.
entailment
def redirect_output(fileobj): """Redirect standard out to file.""" old = sys.stdout sys.stdout = fileobj try: yield fileobj finally: sys.stdout = old
Redirect standard out to file.
entailment
def ssh_config(ssh_user, ssh_private_key_file): """Create temporary ssh config file.""" try: ssh_file = NamedTemporaryFile(delete=False, mode='w+') ssh_file.write('Host *\n') ssh_file.write(' IdentityFile %s\n' % ssh_private_key_file) ssh_file.write(' User %s' % ssh_user) ssh_file.close() yield ssh_file.name finally: with ignored(OSError): os.remove(ssh_file.name)
Create temporary ssh config file.
entailment
def update_history_log(history_log, clear=False, description=None, test_log=None): """ Update the history log file with item. If clear flag is provided the log file is deleted. """ if not test_log and not clear: raise IpaUtilsException( 'A test log or clear flag must be provided.' ) if clear: with ignored(OSError): os.remove(history_log) else: history_dir = os.path.dirname(history_log) if not os.path.isdir(history_dir): try: os.makedirs(history_dir) except OSError as error: raise IpaUtilsException( 'Unable to create directory: %s' % error ) with open(history_log, 'a+') as f: # Using append mode creates file if it does not exist if description: description = '"%s"' % description out = '{} {}'.format( test_log, description or '' ) f.write(out.strip() + '\n')
Update the history log file with item. If clear flag is provided the log file is deleted.
entailment
def validate(self, value): """Validate string by regex :param value: str :return: """ if not self._compiled_regex.match(value): raise ValidationError( 'value {:s} not match r"{:s}"'.format(value, self._regex))
Validate string by regex :param value: str :return:
entailment
def ontology2df(self): '''Updates self.g or self.path bc you could only choose 1''' if isinstance(self.path, str) or isinstance(self.path, p): self.path = str(self.path) filetype = p(self.path).suffix if filetype == '.json': self.g = None try: records = open_json(self.path) return pd.DataFrame(records) except: exit('Json file is not in records format.') if filetype == '.pickle': self.g = None return pickle.load(open(self.path, 'rb')) elif filetype == '.ttl' or filetype == '.rdf': self.g = rdflib.Graph() self.g.parse(self.path, format='turtle') return self.get_sparql_dataframe() elif filetype == '.nt': self.g = rdflib.Graph() self.g.parse(self.path, format='nt') return self.get_sparql_dataframe() elif filetype == '.owl' or filetype == '.xrdf': self.g = rdflib.Graph() try: self.g.parse(self.path, format='xml') except: # some owl formats are more rdf than owl self.g.parse(self.path, format='turtle') return self.get_sparql_dataframe() else: exit('Format options: owl, ttl, df_pickle, rdflib.Graph()') try: return self.get_sparql_dataframe() self.path = None except: exit('Format options: owl, ttl, df_pickle, rdflib.Graph()') elif isinstance(self.g, rdflib.graph.Graph): self.path = None return self.get_sparql_dataframe() else: exit('Obj given is not str, pathlib obj, or an rdflib.Graph()')
Updates self.g or self.path bc you could only choose 1
entailment
def create_pred2common(self): ''' Takes list linked to common name and maps common name to accepted predicate and their respected suffixes to decrease sensitivity. ''' self.pred2common = {} for common_name, ext_preds in self.common2preds.items(): for pred in ext_preds: pred = pred.lower().strip() self.pred2common[pred] = common_name
Takes list linked to common name and maps common name to accepted predicate and their respected suffixes to decrease sensitivity.
entailment
def clean_pred(self, pred, ignore_warning=False): ''' Takes the predicate and returns the suffix, lower case, stripped version ''' original_pred = pred pred = pred.lower().strip() if 'http' in pred: pred = pred.split('/')[-1] elif ':' in pred: if pred[-1] != ':': # some matches are "prefix:" only pred = pred.split(':')[-1] else: if not ignore_warning: exit('Not a valid predicate: ' + original_pred + '. Needs to be an iri "/" or curie ":".') return pred
Takes the predicate and returns the suffix, lower case, stripped version
entailment
def get_common_pred(self, pred): ''' Gets version of predicate and sees if we have a translation to a common relation. INPUT: pred = predicate from the triple OUTPUT: Common relationship or None ''' pred = self.clean_pred(pred) common_pred = self.pred2common.get(pred) return common_pred
Gets version of predicate and sees if we have a translation to a common relation. INPUT: pred = predicate from the triple OUTPUT: Common relationship or None
entailment
def _create_network_interface( self, ip_config_name, nic_name, public_ip, region, resource_group_name, subnet, accelerated_networking=False ): """ Create a network interface in the resource group. Attach NIC to the subnet and public IP provided. """ nic_config = { 'location': region, 'ip_configurations': [{ 'name': ip_config_name, 'private_ip_allocation_method': 'Dynamic', 'subnet': { 'id': subnet.id }, 'public_ip_address': { 'id': public_ip.id }, }] } if accelerated_networking: nic_config['enable_accelerated_networking'] = True try: nic_setup = self.network.network_interfaces.create_or_update( resource_group_name, nic_name, nic_config ) except Exception as error: raise AzureCloudException( 'Unable to create network interface: {0}.'.format( error ) ) return nic_setup.result()
Create a network interface in the resource group. Attach NIC to the subnet and public IP provided.
entailment
def _create_public_ip(self, public_ip_name, resource_group_name, region): """ Create dynamic public IP address in the resource group. """ public_ip_config = { 'location': region, 'public_ip_allocation_method': 'Dynamic' } try: public_ip_setup = \ self.network.public_ip_addresses.create_or_update( resource_group_name, public_ip_name, public_ip_config ) except Exception as error: raise AzureCloudException( 'Unable to create public IP: {0}.'.format(error) ) return public_ip_setup.result()
Create dynamic public IP address in the resource group.
entailment
def _create_resource_group(self, region, resource_group_name): """ Create resource group if it does not exist. """ resource_group_config = {'location': region} try: self.resource.resource_groups.create_or_update( resource_group_name, resource_group_config ) except Exception as error: raise AzureCloudException( 'Unable to create resource group: {0}.'.format(error) )
Create resource group if it does not exist.
entailment
def _create_storage_profile(self): """ Create the storage profile for the instance. Image reference can be a custom image name or a published urn. """ if self.image_publisher: storage_profile = { 'image_reference': { 'publisher': self.image_publisher, 'offer': self.image_offer, 'sku': self.image_sku, 'version': self.image_version }, } else: for image in self.compute.images.list(): if image.name == self.image_id: image_id = image.id break else: raise AzureCloudException( 'Image with name {0} not found.'.format(self.image_id) ) storage_profile = { 'image_reference': { 'id': image_id } } return storage_profile
Create the storage profile for the instance. Image reference can be a custom image name or a published urn.
entailment
def _create_subnet(self, resource_group_name, subnet_id, vnet_name): """ Create a subnet in the provided vnet and resource group. """ subnet_config = {'address_prefix': '10.0.0.0/29'} try: subnet_setup = self.network.subnets.create_or_update( resource_group_name, vnet_name, subnet_id, subnet_config ) except Exception as error: raise AzureCloudException( 'Unable to create subnet: {0}.'.format(error) ) return subnet_setup.result()
Create a subnet in the provided vnet and resource group.
entailment
def _create_virtual_network(self, region, resource_group_name, vnet_name): """ Create a vnet in the given resource group with default address space. """ vnet_config = { 'location': region, 'address_space': { 'address_prefixes': ['10.0.0.0/27'] } } try: vnet_setup = self.network.virtual_networks.create_or_update( resource_group_name, vnet_name, vnet_config ) except Exception as error: raise AzureCloudException( 'Unable to create vnet: {0}.'.format(error) ) vnet_setup.wait()
Create a vnet in the given resource group with default address space.
entailment
def _create_vm(self, vm_config): """ Attempt to create or update VM instance based on vm_parameters config. """ try: vm_setup = self.compute.virtual_machines.create_or_update( self.running_instance_id, self.running_instance_id, vm_config ) except Exception as error: raise AzureCloudException( 'An exception occurred creating virtual machine: {0}'.format( error ) ) vm_setup.wait()
Attempt to create or update VM instance based on vm_parameters config.
entailment
def _create_vm_config(self, interface): """ Create the VM config dictionary. Requires an existing network interface object. """ # Split image ID into it's components. self._process_image_id() hardware_profile = { 'vm_size': self.instance_type or AZURE_DEFAULT_TYPE } network_profile = { 'network_interfaces': [{ 'id': interface.id, 'primary': True }] } storage_profile = self._create_storage_profile() os_profile = { 'computer_name': self.running_instance_id, 'admin_username': self.ssh_user, 'linux_configuration': { 'disable_password_authentication': True, 'ssh': { 'public_keys': [{ 'path': '/home/{0}/.ssh/authorized_keys'.format( self.ssh_user ), 'key_data': self.ssh_public_key }] } } } vm_config = { 'location': self.region, 'os_profile': os_profile, 'hardware_profile': hardware_profile, 'storage_profile': storage_profile, 'network_profile': network_profile } return vm_config
Create the VM config dictionary. Requires an existing network interface object.
entailment
def _get_instance(self): """ Return the instance matching the running_instance_id. """ try: instance = self.compute.virtual_machines.get( self.running_instance_id, self.running_instance_id, expand='instanceView' ) except Exception as error: raise AzureCloudException( 'Unable to retrieve instance: {0}'.format(error) ) return instance
Return the instance matching the running_instance_id.
entailment
def _get_instance_state(self): """ Retrieve state of instance. """ instance = self._get_instance() statuses = instance.instance_view.statuses for status in statuses: if status.code.startswith('PowerState'): return status.display_status
Retrieve state of instance.
entailment
def _get_management_client(self, client_class): """ Return instance of resource management client. """ try: client = get_client_from_auth_file( client_class, auth_path=self.service_account_file ) except ValueError as error: raise AzureCloudException( 'Service account file format is invalid: {0}.'.format(error) ) except KeyError as error: raise AzureCloudException( 'Service account file missing key: {0}.'.format(error) ) except Exception as error: raise AzureCloudException( 'Unable to create resource management client: ' '{0}.'.format(error) ) return client
Return instance of resource management client.
entailment
def _launch_instance(self): """ Create new test instance in a resource group with the same name. """ self.running_instance_id = ipa_utils.generate_instance_name( 'azure-ipa-test' ) self.logger.debug('ID of instance: %s' % self.running_instance_id) self._set_default_resource_names() try: # Try block acts as a transaction. If an exception is raised # attempt to cleanup the resource group and all created resources. # Create resource group. self._create_resource_group(self.region, self.running_instance_id) if self.subnet_id: # Use existing vnet/subnet. subnet = self.network.subnets.get( self.vnet_resource_group, self.vnet_name, self.subnet_id ) else: self.subnet_id = ''.join([self.running_instance_id, '-subnet']) self.vnet_name = ''.join([self.running_instance_id, '-vnet']) # Create new vnet self._create_virtual_network( self.region, self.running_instance_id, self.vnet_name ) # Create new subnet in new vnet subnet = self._create_subnet( self.running_instance_id, self.subnet_id, self.vnet_name ) # Setup interface and public ip in resource group. public_ip = self._create_public_ip( self.public_ip_name, self.running_instance_id, self.region ) interface = self._create_network_interface( self.ip_config_name, self.nic_name, public_ip, self.region, self.running_instance_id, subnet, self.accelerated_networking ) # Get dictionary of VM parameters and create instance. vm_config = self._create_vm_config(interface) self._create_vm(vm_config) except Exception: try: self._terminate_instance() except Exception: pass raise else: # Ensure VM is in the running state. self._wait_on_instance('VM running', timeout=self.timeout)
Create new test instance in a resource group with the same name.
entailment
def _process_image_id(self): """ Split image id into component values. Example: SUSE:SLES:12-SP3:2018.01.04 Publisher:Offer:Sku:Version Raises: If image_id is not a valid format. """ try: image_info = self.image_id.strip().split(':') self.image_publisher = image_info[0] self.image_offer = image_info[1] self.image_sku = image_info[2] self.image_version = image_info[3] except Exception: self.image_publisher = None
Split image id into component values. Example: SUSE:SLES:12-SP3:2018.01.04 Publisher:Offer:Sku:Version Raises: If image_id is not a valid format.
entailment
def _set_default_resource_names(self): """ Generate names for resources based on the running_instance_id. """ self.ip_config_name = ''.join([ self.running_instance_id, '-ip-config' ]) self.nic_name = ''.join([self.running_instance_id, '-nic']) self.public_ip_name = ''.join([self.running_instance_id, '-public-ip'])
Generate names for resources based on the running_instance_id.
entailment
def _set_image_id(self): """ If an existing instance is used get image id from deployment. """ instance = self._get_instance() image_info = instance.storage_profile.image_reference if image_info.publisher: self.image_id = ':'.join([ image_info.publisher, image_info.offer, image_info.sku, image_info.version ]) else: self.image_id = image_info.id.rsplit('/', maxsplit=1)[1]
If an existing instance is used get image id from deployment.
entailment
def _set_instance_ip(self): """ Get the IP address based on instance ID. If public IP address not found attempt to get private IP. """ try: ip_address = self.network.public_ip_addresses.get( self.running_instance_id, self.public_ip_name ).ip_address except Exception: try: ip_address = self.network.network_interfaces.get( self.running_instance_id, self.nic_name ).ip_configurations[0].private_ip_address except Exception as error: raise AzureCloudException( 'Unable to retrieve instance IP address: {0}.'.format( error ) ) self.instance_ip = ip_address
Get the IP address based on instance ID. If public IP address not found attempt to get private IP.
entailment
def _start_instance(self): """ Start the instance. """ try: vm_start = self.compute.virtual_machines.start( self.running_instance_id, self.running_instance_id ) except Exception as error: raise AzureCloudException( 'Unable to start instance: {0}.'.format(error) ) vm_start.wait()
Start the instance.
entailment
def _stop_instance(self): """ Stop the instance. """ try: vm_stop = self.compute.virtual_machines.power_off( self.running_instance_id, self.running_instance_id ) except Exception as error: raise AzureCloudException( 'Unable to stop instance: {0}.'.format(error) ) vm_stop.wait()
Stop the instance.
entailment
def _terminate_instance(self): """ Terminate the resource group and instance. """ try: self.resource.resource_groups.delete(self.running_instance_id) except Exception as error: raise AzureCloudException( 'Unable to terminate resource group: {0}.'.format(error) )
Terminate the resource group and instance.
entailment
def preferred_change(data): ''' Determines preferred existing id based on curie prefix in the ranking list ''' ranking = [ 'CHEBI', 'NCBITaxon', 'COGPO', 'CAO', 'DICOM', 'UBERON', 'NLX', 'NLXANAT', 'NLXCELL', 'NLXFUNC', 'NLXINV', 'NLXORG', 'NLXRES', 'NLXSUB' 'BIRNLEX', 'SAO', 'NDA.CDE', 'PR', 'IAO', 'NIFEXT', 'OEN', 'ILX', ] mock_rank = ranking[::-1] score = [] old_pref_index = None for i, d in enumerate(data['existing_ids']): if not d.get('preferred'): # db allows None or '' which will cause a problem d['preferred'] = 0 if int(d['preferred']) == 1: old_pref_index = i if d.get('curie'): pref = d['curie'].split(':')[0] if pref in mock_rank: score.append(mock_rank.index(pref)) else: score.append(-1) else: score.append(-1) new_pref_index = score.index(max(score)) new_pref_iri = data['existing_ids'][new_pref_index]['iri'] if new_pref_iri.rsplit('/', 1)[0] == 'http://uri.interlex.org/base': if old_pref_index: if old_pref_index != new_pref_index: return data for e in data['existing_ids']: e['preferred'] = 0 data['existing_ids'][new_pref_index]['preferred'] = 1 return data
Determines preferred existing id based on curie prefix in the ranking list
entailment
def merge(new, old): ''' synonyms and existing_ids are part of an object bug that can create duplicates if in the same batch ''' for k, vals in new.items(): if k == 'synonyms': new_synonyms = vals if old['synonyms']: old_literals = [syn['literal'].lower().strip() for syn in old['synonyms']] for new_synonym in new_synonyms: if new_synonym['literal'].lower().strip() not in old_literals: old['synonyms'].append(new_synonym) # default is a list in SciCrunch, that's why this works without initing old['synonyms'] else: old['synonyms'].extend(new['synonyms']) elif k == 'existing_ids': iris = [e['iri'] for e in old['existing_ids']] for new_existing_id in vals: new_existing_id['preferred'] = 0 if 'change' not in list(new_existing_id): # notion that you want to add it new_existing_id['change'] = False if new_existing_id.get('delete') == True: if new_existing_id['iri'] in iris: new_existing_ids = [] for e in old['existing_ids']: if e['iri'] != new_existing_id['iri']: new_existing_ids.append(e) old['existing_ids'] = new_existing_ids else: print(new_existing_id) sys.exit("You want to delete an iri that doesn't exist") elif new_existing_id.get('replace') == True: if not new_existing_id.get('old_iri'): sys.exit( 'Need to have old_iri as a key to have a ref for replace' ) old_iri = new_existing_id.pop('old_iri') if old_iri in iris: new_existing_ids = [] for e in old['existing_ids']: if e['iri'] == old_iri: if new_existing_id.get('curie'): e['curie'] = new_existing_id['curie'] if new_existing_id.get('iri'): e['iri'] = new_existing_id['iri'] new_existing_ids.append(e) old['existing_ids'] = new_existing_ids else: print(new_existing_id) sys.exit("You want to replace an iri that doesn't exist", '\n', new) else: if new_existing_id['iri'] not in iris and new_existing_id['change'] == True: sys.exit('You want to change iri that doesnt exist ' + str(new)) elif new_existing_id['iri'] not in iris and new_existing_id['change'] == False: old['existing_ids'].append(new_existing_id) elif new_existing_id['iri'] in iris and new_existing_id['change'] == True: new_existing_ids = [] for e in old['existing_ids']: if e['iri'] == new_existing_id['iri']: if not new_existing_id.get('curie'): new_existing_id['curie'] = e['curie'] new_existing_ids.append(new_existing_id) else: new_existing_ids.append(e) old['existing_ids'] = new_existing_ids elif new_existing_id['iri'] in iris and new_existing_id['change'] == False: pass # for sanity readability else: sys.exit('Something broke while merging in existing_ids') elif k in ['definition', 'superclasses', 'id', 'type', 'comment', 'label', 'uid', 'ontologies']: old[k] = vals # TODO: still need to mark them... but when batch elastic for update works # old['uid'] = 34142 # DEBUG: need to mark as mine manually until all Old terms are fixed ''' REMOVE REPEATS; needs to exist due to server overloads ''' if old.get('synonyms'): visited = {} new_synonyms = [] for synonym in old['synonyms']: if not visited.get(synonym.get('literal')): new_synonyms.append(synonym) visited[synonym['literal']] = True old['synonyms'] = new_synonyms visited = {} new_existing_ids = [] for e in old['existing_ids']: if not visited.get(e['iri']): new_existing_ids.append(e) visited[e['iri']] = True old['existing_ids'] = new_existing_ids old = preferred_change(old) return old
synonyms and existing_ids are part of an object bug that can create duplicates if in the same batch
entailment
def main(context, no_color): """ Ipa provides a Python API and command line utility for testing images. It can be used to test images in the Public Cloud (AWS, Azure, GCE, etc.). """ if context.obj is None: context.obj = {} context.obj['no_color'] = no_color
Ipa provides a Python API and command line utility for testing images. It can be used to test images in the Public Cloud (AWS, Azure, GCE, etc.).
entailment
def results(context, history_log): """Process provided history log and results files.""" if context.obj is None: context.obj = {} context.obj['history_log'] = history_log if context.invoked_subcommand is None: context.invoke(show, item=1)
Process provided history log and results files.
entailment
def archive(context, clear_log, items, path, name): """ Archive the history log and all results/log files. After archive is created optionally clear the history log. """ history_log = context.obj['history_log'] no_color = context.obj['no_color'] with open(history_log, 'r') as f: # Get history items history_items = f.readlines() if items: # Split comma separated list and cast indices to integer. items = [int(item) for item in items.split(',')] lines = [] for index in items: lines.append(history_items[len(history_items) - index]) history_items = lines with tempfile.TemporaryDirectory() as temp_dir: for item in history_items: # Copy log and results file, # update results file with relative path. archive_history_item(item, temp_dir, no_color) file_name = ''.join([name, '.tar.gz']) archive_path = os.path.join(path, file_name) with tarfile.open(archive_path, "w:gz") as tar: # Create tar archive tar.add(temp_dir, arcname='results') if clear_log: if items: # Remove duplicates to prevent unwanted deletion. items = list(set(items)) # Must delete items from bottom to top of history file # to preserve indices. (Index 0 is last item in file) items.sort() for index in items: context.invoke(delete, item=index) else: context.invoke(clear) click.echo( 'Exported results history to archive: {0}'.format(archive_path) )
Archive the history log and all results/log files. After archive is created optionally clear the history log.
entailment
def delete(context, item): """ Delete the specified history item from the history log. """ history_log = context.obj['history_log'] no_color = context.obj['no_color'] try: with open(history_log, 'r+') as f: lines = f.readlines() history = lines.pop(len(lines) - item) f.seek(0) f.write(''.join(lines)) f.flush() f.truncate() except IndexError: echo_style( 'History result at index %s does not exist.' % item, no_color, fg='red' ) sys.exit(1) except Exception as error: echo_style( 'Unable to delete result item {0}. {1}'.format(item, error), no_color, fg='red' ) sys.exit(1) log_file = get_log_file_from_item(history) try: os.remove(log_file) except Exception: echo_style( 'Unable to delete results file for item {0}.'.format(item), no_color, fg='red' ) try: os.remove(log_file.rsplit('.', 1)[0] + '.results') except Exception: echo_style( 'Unable to delete log file for item {0}.'.format(item), no_color, fg='red' )
Delete the specified history item from the history log.
entailment
def show(context, log, results_file, verbose, item): """ Print test results info from provided results json file. If no results file is supplied echo results from most recent test in history if it exists. If verbose option selected, echo all test cases. If log option selected echo test log. """ history_log = context.obj['history_log'] no_color = context.obj['no_color'] if not results_file: # Find results/log file from history # Default -1 is most recent test run try: with open(history_log, 'r') as f: lines = f.readlines() history = lines[len(lines) - item] except IndexError: echo_style( 'History result at index %s does not exist.' % item, no_color, fg='red' ) sys.exit(1) except Exception: echo_style( 'Unable to retrieve results history, ' 'provide results file or re-run test.', no_color, fg='red' ) sys.exit(1) log_file = get_log_file_from_item(history) if log: echo_log(log_file, no_color) else: echo_results_file( log_file.rsplit('.', 1)[0] + '.results', no_color, verbose ) elif log: # Log file provided echo_log(results_file, no_color) else: # Results file provided echo_results_file(results_file, no_color, verbose)
Print test results info from provided results json file. If no results file is supplied echo results from most recent test in history if it exists. If verbose option selected, echo all test cases. If log option selected echo test log.
entailment
def _get_ssh_client(self): """Return a new or existing SSH client for given ip.""" return ipa_utils.get_ssh_client( self.instance_ip, self.ssh_private_key_file, self.ssh_user, timeout=self.timeout )
Return a new or existing SSH client for given ip.
entailment
def _log_info(self): """Output test run information to top of log file.""" if self.cloud == 'ssh': self.results['info'] = { 'platform': self.cloud, 'distro': self.distro_name, 'image': self.instance_ip, 'timestamp': self.time_stamp, 'log_file': self.log_file, 'results_file': self.results_file } else: self.results['info'] = { 'platform': self.cloud, 'region': self.region, 'distro': self.distro_name, 'image': self.image_id, 'instance': self.running_instance_id, 'timestamp': self.time_stamp, 'log_file': self.log_file, 'results_file': self.results_file } self._write_to_log( '\n'.join( '%s: %s' % (key, val) for key, val in self.results['info'].items() ) )
Output test run information to top of log file.
entailment
def _write_to_log(self, output): """Write the output string to the log file.""" with open(self.log_file, 'a') as log_file: log_file.write('\n') log_file.write(output) log_file.write('\n')
Write the output string to the log file.
entailment
def _merge_results(self, results): """Combine results of test run with exisiting dict.""" self.results['tests'] += results['tests'] for key, value in results['summary'].items(): self.results['summary'][key] += value
Combine results of test run with exisiting dict.
entailment
def _save_results(self): """Save results dictionary to json file.""" with open(self.results_file, 'w') as results_file: json.dump(self.results, results_file)
Save results dictionary to json file.
entailment
def _set_distro(self): """Determine distro for image and create instance of class.""" if self.distro_name == 'sles': self.distro = SLES() elif self.distro_name == 'opensuse_leap': self.distro = openSUSE_Leap() else: raise IpaCloudException( 'Distribution: %s, not supported.' % self.distro_name )
Determine distro for image and create instance of class.
entailment