code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
prefix, namespace, name = self.g.compute_qname(uri)
qname = prefix + ':' + name
self.rqname[qname] = iri
return qname | def qname(self, iri: str) -> str | Get qualified name of uri in rdflib graph while also saving it
Args: iri: The iri that you want to replace the uri with a known prefix with
Returns: qualified name of the iri to be used as the new predicate | 7.812081 | 8.012969 | 0.97493 |
ns = Namespace(uri)
if not self.namespaces.get(prefix):
self.namespaces[prefix] = ns
self.g.bind(prefix, uri)
return ns | def add_namespace(self, prefix: str, uri: str) -> Namespace | Adds a prefix to uri mapping (namespace)
Adds a namespace to replace any uris in iris with shortened prefixes
in order to make the file more readable. Not techniqually necessary.
Args:
prefix: prefix that will substitute the uri in the iri
uri: the uri in the iri to be substituted by the prefix
Returns:
A namespace of the uri
Example:
add_namespace(
prefix = "rdfs",
uri = 'http://www.w3.org/2000/01/rdf-schema#',
)
makes
"http://www.w3.org/2000/01/rdf-schema#label 'neuron'@en ;",
become
"rdfs:label 'neuron'@en ;" | 3.276084 | 5.79328 | 0.565497 |
iri = str(iri)
max_iri_len = 0
max_prefix = None
for prefix, uri in common_namespaces.items():
if uri in iri and max_iri_len < len(uri): # if matched uri is larger; replace as king
max_prefix = prefix
max_iri_len = len(uri)
return max_prefix | def find_prefix(self, iri: Union[URIRef, Literal, str]) -> Union[None, str] | Finds if uri is in common_namespaces
Auto adds prefix if incoming iri has a uri in common_namespaces. If its not in the local
library, then it will just be spit out as the iri and not saved/condensed into qualified
names.
The reason for the maxes is find the longest string match. This is to avoid accidently
matching iris with small uris when really is a more complete uri that is the match.
Args: iri: iri to be searched to find a known uri in it.
Eample:
In [1]: print(find_prefix("http://www.w3.org/2000/01/rdf-schema#label"))
Out [1]: "http://www.w3.org/2000/01/rdf-schema#"
In [2]: print(find_prefix("http://made_up_uri/label"))
Out [2]: None | 5.459621 | 4.516794 | 1.208738 |
bnode: BNode = self.triple2annotation_bnode.get( (subj, pred, obj) )
if not bnode:
a_s: BNode = BNode()
self.triple2annotation_bnode[(subj, pred, obj)]: BNode = a_s
self.g.add((a_s, RDF.type, OWL.Axiom))
self.g.add((a_s, OWL.annotatedSource, self.process_subj_or_pred(subj)))
self.g.add((a_s, OWL.annotatedProperty,self.process_subj_or_pred(pred)))
self.g.add((a_s, OWL.annotatedTarget, self.process_obj(obj)))
else:
a_s: BNode = bnode
self.g.add((a_s, self.process_subj_or_pred(a_p), self.process_obj(a_o)))
return bnode | def add_annotation(
self,
subj: URIRef,
pred: URIRef,
obj: Union[Literal, URIRef],
a_p: URIRef ,
a_o: Union[Literal, URIRef],
) -> BNode | Adds annotation to rdflib graph.
The annotation axiom will filled in if this is a new annotation for the triple.
Args:
subj: Entity subject to be annotated
pref: Entities Predicate Anchor to be annotated
obj: Entities Object Anchor to be annotated
a_p: Annotation predicate
a_o: Annotation object
Returns:
A BNode which is an address to the location in the RDF graph that is storing the
annotation information. | 2.0481 | 2.088927 | 0.980456 |
if obj in [None, "", " "]: return # Empty objects are bad practice
_subj = self.process_subj_or_pred(subj)
_pred = self.process_subj_or_pred(pred)
_obj = self.process_obj(obj)
self.g.add( (_subj, _pred, _obj) ) | def add_triple(
self,
subj: Union[URIRef, str],
pred: Union[URIRef, str],
obj: Union[URIRef, Literal, str]
) -> None | Adds triple to rdflib Graph
Triple can be of any subject, predicate, and object of the entity without a need for order.
Args:
subj: Entity subject
pred: Entity predicate
obj: Entity object
Example:
In [1]: add_triple(
...: 'http://uri.interlex.org/base/ilx_0101431',
...: RDF.type,
...: 'http://www.w3.org/2002/07/owl#Class')
...: ) | 4.131722 | 4.917644 | 0.840183 |
if self.namespaces.get(prefix):
return self.namespaces[prefix]
iri: str = common_namespaces.get(prefix)
if iri:
return self.add_namespace(prefix, iri) | def process_prefix(self, prefix: str) -> Union[Namespace, None] | Add namespace to graph if it has a local match
This allows qnames to be used without adding their respected namespaces if they are in
the common_namespaces local dict. This is is to save a butt-ton of time trying to see what
the ontology has as far as uris go.
Args: prefix: prefix of the uri in the rdflib namespace to be checked if it exists in
the local dict of common_namespaces.
Returns: Namespace of uri if add or already exists; else None | 3.627277 | 4.354432 | 0.833008 |
if 'http' in component:
prefix = self.find_prefix(component) # Find uri in iri based on common_namespaces
if prefix: self.process_prefix(prefix) # if match, will add to Graph namespaces
return URIRef(component)
elif ':' in component:
presumed_prefix, info = component.split(':', 1)
namespace: Union[Namespace, None] = self.process_prefix(presumed_prefix)
if not namespace: exit(component + ': qname namespace does\'t exist yet.')
return namespace[info]
exit(component + ': is not a valid subject or predicate') | def process_subj_or_pred(self, component: Union[URIRef, str]) -> URIRef | Adds viable uri from iri or expands viable qname to iri to be triple ready
Need to have a viable qualified name (qname) in order to use a qname. You can make it
viable by either add the namespace beforehand with add_namespace(s) or if its already
in the local common_namespaces preloaded.
Args:
component: entity subject or predicate to be expanded or have its uri saved.
Returns:
rdflib URIRef ready subject or predicate to be put into a triple.
Raises:
SystemExit: When expecting a qname to be expanded, but is not valid or if
component is not a qualified name or a iri. | 9.033532 | 7.358745 | 1.227591 |
if isinstance(obj, dict) or isinstance(obj, list):
exit(str(obj) + ': should be str or intended to be a URIRef or Literal.')
if isinstance(obj, Literal) or isinstance(obj, URIRef):
prefix = self.find_prefix(obj)
if prefix: self.process_prefix(prefix)
return obj
if len(obj) > 8:
if 'http' == obj[:4] and '://' in obj and ' ' not in obj:
prefix = self.find_prefix(obj)
if prefix: self.process_prefix(prefix)
return URIRef(obj)
if ':' in str(obj):
presumed_prefix, info = obj.split(':', 1)
namespace: Union[Namespace, None] = self.process_prefix(presumed_prefix)
if namespace: return namespace[info]
return Literal(obj) | def process_obj(self, obj: Union[URIRef, Literal, str]) -> Union[URIRef, Literal] | Gives component the proper node type
Args:
obj: Entity object to be converted to its appropriate node type
Returns:
URIRef or Literal type of the object provided.
Raises:
SystemExit: If object is a dict or list it becomes str with broken data. Needs to
come in one object at a time. | 3.557326 | 3.57515 | 0.995015 |
kwargs = {key: str(value) for key, value in kwargs.items()}
return self.g.serialize(**kwargs) | def serialize(self, **kwargs) -> str | rdflib.Graph().serialize wrapper
Original serialize cannot handle PosixPath from pathlib. You should ignore everything, but
destination and format. format is a must, but if you don't include a destination, it will
just return the formated graph as an str output.
Args:
destination: Output file path,
format: format for for the triple to be put together as: 'xml', 'n3', 'turtle', 'nt',
'pretty-xml', 'trix', 'trig' and 'nquads' are built in. json-ld in rdflib_jsonld
base: none
encoding: None
**args: None | 6.170414 | 4.515609 | 1.366463 |
self.g.remove( (subj, pred, obj) ) | def remove_triple(
self,
subj: URIRef,
pred: URIRef,
obj: Union[URIRef, Literal]
) -> None | Removes triple from rdflib Graph
You must input the triple in its URIRef or Literal form for each node exactly the way it
was inputed or it will not delete the triple.
Args:
subj: Entity subject to be removed it its the only node with this subject; else this is
just going to delete a desciption I.E. predicate_object of this entity.
pred: Entity predicate to be removed
obj: Entity object to be removed | 6.06147 | 5.146836 | 1.177708 |
print(self.g.serialize(format=format).decode('utf-8')) | def print_graph(self, format: str = 'turtle') -> str | prints serialized formated rdflib Graph | 5.737971 | 3.597922 | 1.594801 |
url_base = self.base_url + '/api/1/term/view/{id}' + '?key=' + self.api_key
urls = [url_base.format(id=str(_id)) for _id in ids]
return self.get(
urls=urls,
LIMIT=LIMIT,
action='Searching For Terms',
crawl=crawl,
_print=_print) | def identifierSearches(self,
ids=None,
LIMIT=25,
_print=True,
crawl=False) | parameters( data = "list of term_ids" ) | 4.656836 | 4.530256 | 1.027941 |
url_base = self.base_url + "/api/1/ilx/search/identifier/{identifier}?key={APIKEY}"
urls = [url_base.format(identifier=ilx_id.replace('ILX:', 'ilx_'), APIKEY=self.api_key) for ilx_id in ilx_ids]
return self.get(
urls=urls,
LIMIT=LIMIT,
action='Searching For Terms',
crawl=crawl,
_print=_print) | def ilxSearches(self,
ilx_ids=None,
LIMIT=25,
_print=True,
crawl=False) | parameters( data = "list of ilx_ids" ) | 4.265511 | 4.146924 | 1.028596 |
url_base = self.base_url + '/api/1/term/edit/{id}'
merged_data = []
# PHP on the server is is LOADED with bugs. Best to just duplicate entity data and change
# what you need in it before re-upserting the data.
old_data = self.identifierSearches(
[d['id'] for d in data], # just need the ids
LIMIT = LIMIT,
_print = _print,
crawl = crawl,
)
for d in data: # d for dictionary
url = url_base.format(id=str(d['id']))
# Reason this exists is to avoid contradictions in case you are using a local reference
if d['ilx'] != old_data[int(d['id'])]['ilx']:
print(d['ilx'], old_data[int(d['id'])]['ilx'])
exit('You might be using beta insead of production!')
merged = scicrunch_client_helper.merge(new=d, old=old_data[int(d['id'])])
merged = scicrunch_client_helper.superclasses_bug_fix(merged) # BUG: superclass output diff than input needed
merged_data.append((url, merged))
resp = self.post(
merged_data,
LIMIT = LIMIT,
action = 'Updating Terms', # forced input from each function
_print = _print,
crawl = crawl,
)
return resp | def updateTerms(self, data:list, LIMIT:int=20, _print:bool=True, crawl:bool=False,) -> list | Updates existing entities
Args:
data:
needs:
id <str>
ilx_id <str>
options:
definition <str> #bug with qutations
superclasses [{'id':<int>}]
type term, cde, anntation, or relationship <str>
synonyms {'literal':<str>}
existing_ids {'iri':<str>,'curie':<str>','change':<bool>, 'delete':<bool>}
LIMIT:
limit of concurrent
_print:
prints label of data presented
crawl:
True: Uses linear requests.
False: Uses concurrent requests from the asyncio and aiohttp modules
Returns:
List of filled in data parallel with the input data. If any entity failed with an
ignorable reason, it will return empty for the item in the list returned. | 8.68363 | 8.276678 | 1.049169 |
needed = set([
'label',
'type',
])
url_base = self.base_url + '/api/1/ilx/add'
terms = []
for d in data:
if (set(list(d)) & needed) != needed:
exit('You need keys: '+ str(needed - set(list(d))))
if not d.get('label') or not d.get('type'): # php wont catch empty type!
exit('=== Data is missing label or type! ===')
d['term'] = d.pop('label') # ilx only accepts term, will need to replaced back
#d['batch-elastic'] = 'True' # term/add and edit should be ready now
terms.append((url_base, d))
primer_responses = self.post(
terms,
action='Priming Terms',
LIMIT=LIMIT,
_print=_print,
crawl=crawl)
ilx = {}
for primer_response in primer_responses:
primer_response['term'] = primer_response['term'].replace(''', "'")
primer_response['term'] = primer_response['term'].replace('"', '"')
primer_response['label'] = primer_response.pop('term')
ilx[primer_response['label'].lower()] = primer_response
url_base = self.base_url + '/api/1/term/add'
terms = []
for d in data:
d['label'] = d.pop('term')
d = scicrunch_client_helper.superclasses_bug_fix(d)
if not ilx.get(d['label'].lower()): # ilx can be incomplete if errored term
continue
try:
d.update({'ilx': ilx[d['label'].lower()]['ilx']})
except:
d.update({'ilx': ilx[d['label'].lower()]['fragment']})
terms.append((url_base, d))
return self.post(
terms,
action='Adding Terms',
LIMIT=LIMIT,
_print=_print,
crawl=crawl) | def addTerms(self, data, LIMIT=25, _print=True, crawl=False) | need:
label <str>
type term, cde, anntation, or relationship <str>
options:
definition <str> #bug with qutations
superclasses [{'id':<int>}]
synonyms [{'literal':<str>}]
existing_ids [{'iri':<str>,'curie':<str>'}]
ontologies [{'id':<int>}]
[{'type':'term', 'label':'brain'}] | 4.65892 | 4.613099 | 1.009933 |
url_base = self.base_url + \
'/api/1/term/get-annotations/{tid}?key=' + self.api_key
urls = [url_base.format(tid=str(tid)) for tid in tids]
return self.get(urls,
LIMIT=LIMIT,
_print=_print,
crawl=crawl) | def getAnnotations_via_tid(self,
tids,
LIMIT=25,
_print=True,
crawl=False) | tids = list of term ids that possess the annoations | 3.337288 | 3.3211 | 1.004874 |
url_base = self.base_url + \
'/api/1/term/get-annotation/{id}?key=' + self.api_key
urls = [
url_base.format(id=str(annotation_id))
for annotation_id in annotation_ids
]
return self.get(urls, LIMIT=LIMIT, _print=_print, crawl=crawl) | def getAnnotations_via_id(self,
annotation_ids,
LIMIT=25,
_print=True,
crawl=False) | tids = list of strings or ints that are the ids of the annotations themselves | 3.403183 | 3.6586 | 0.930187 |
url_base = self.base_url + \
'/api/1/term/edit-annotation/{id}' # id of annotation not term id
annotations = self.getAnnotations_via_id([d['id'] for d in data],
LIMIT=LIMIT,
_print=_print,
crawl=crawl)
annotations_to_update = []
for d in data:
annotation = annotations[int(d['id'])]
annotation.update({**d})
url = url_base.format(id=annotation['id'])
annotations_to_update.append((url, annotation))
self.post(annotations_to_update,
LIMIT=LIMIT,
action='Updating Annotations',
_print=_print,
crawl=crawl) | def updateAnnotations(self,
data,
LIMIT=25,
_print=True,
crawl=False,) | data = [{'id', 'tid', 'annotation_tid', 'value', 'comment', 'upvote', 'downvote',
'curator_status', 'withdrawn', 'term_version', 'annotation_term_version', 'orig_uid',
'orig_time'}] | 3.920255 | 3.848227 | 1.018717 |
url_base = self.base_url + \
'/api/1/term/edit-annotation/{annotation_id}' # id of annotation not term id; thx past troy!
annotations = self.getAnnotations_via_id(annotation_ids,
LIMIT=LIMIT,
_print=_print,
crawl=crawl)
annotations_to_delete = []
for annotation_id in annotation_ids:
annotation = annotations[int(annotation_id)]
params = {
'value': ' ', # for delete
'annotation_tid': ' ', # for delete
'tid': ' ', # for delete
'term_version': '1',
'annotation_term_version': '1',
}
url = url_base.format(annotation_id=annotation_id)
annotation.update({**params})
annotations_to_delete.append((url, annotation))
return self.post(annotations_to_delete,
LIMIT=LIMIT,
_print=_print,
crawl=crawl) | def deleteAnnotations(self,
annotation_ids,
LIMIT=25,
_print=True,
crawl=False,) | data = list of ids | 4.295501 | 4.328836 | 0.992299 |
url_base = self.base_url + '/api/1/term/add-relationship'
relationships = []
for relationship in data:
relationship.update({
'term1_version': relationship['term1_version'],
'term2_version': relationship['term2_version'],
'relationship_term_version': relationship['relationship_term_version']
})
relationships.append((url_base, relationship))
return self.post(
relationships,
LIMIT = LIMIT,
action = 'Adding Relationships',
_print = _print,
crawl = crawl,
) | def addRelationships(
self,
data: list,
LIMIT: int = 20,
_print: bool = True,
crawl: bool = False,
) -> list | data = [{
"term1_id", "term2_id", "relationship_tid",
"term1_version", "term2_version",
"relationship_term_version",}] | 3.531451 | 2.692318 | 1.311677 |
term_id, term_version = [(d['id'], d['version'])
for d in self.ilxSearches([ilx_id], crawl=True, _print=False).values()][0]
annotations = [{
'tid': term_id,
'annotation_tid': '306375', # id for annotation "deprecated"
'value': 'True',
'term_version': term_version,
'annotation_term_version': '1', # term version for annotation "deprecated"
}]
if note:
editor_note = {
'tid': term_id,
'annotation_tid': '306378', # id for annotation "editorNote"
'value': note,
'term_version': term_version,
'annotation_term_version': '1', # term version for annotation "deprecated"
}
annotations.append(editor_note)
self.addAnnotations(annotations, crawl=True, _print=False)
print(annotations) | def deprecate_entity(
self,
ilx_id: str,
note = None,
) -> None | Tagged term in interlex to warn this term is no longer used
There isn't an proper way to delete a term and so we have to mark it so I can
extrapolate that in mysql/ttl loads.
Args:
term_id: id of the term of which to be deprecated
term_version: version of the term of which to be deprecated
Example: deprecateTerm('ilx_0101431', '6') | 3.848257 | 3.628473 | 1.060572 |
needed = set([
'label',
'type',
])
url_ilx_add = self.base_url + '/api/1/ilx/add'
url_term_add = self.base_url + '/api/1/term/add'
url_term_update = self.base_url + '/api/1/term/edit/{id}'
if (set(list(entity)) & needed) != needed:
exit('You need keys: '+ str(needed - set(list(d))))
# to ensure uniqueness
random_string = ''.join(random.choices(string.ascii_uppercase + string.digits, k=25))
real_label = entity['label']
entity['label'] = entity['label'] + '_' + random_string
entity['term'] = entity.pop('label') # ilx only accepts term, will need to replaced back
primer_response = self.post([(url_ilx_add, entity.copy())], _print=False, crawl=True)[0]
entity['label'] = entity.pop('term')
entity['ilx'] = primer_response['fragment'] if primer_response.get('fragment') else primer_response['ilx']
entity = scicrunch_client_helper.superclasses_bug_fix(entity)
response = self.post([(url_term_add, entity.copy())], _print=False, crawl=True)[0]
old_data = self.identifierSearches(
[response['id']], # just need the ids
_print = False,
crawl = True,
)[response['id']]
old_data['label'] = real_label
entity = old_data.copy()
url_term_update = url_term_update.format(id=entity['id'])
return self.post([(url_term_update, entity)], _print=False, crawl=True) | def force_add_term(self, entity: dict) | Need to add an entity that already has a label existing in InterLex?
Well this is the function for you!
entity:
need:
label <str>
type term, cde, pde, fde, anntation, or relationship <str>
options:
definition <str>
superclasses [{'id':<int>}]
synonyms [{'literal':<str>}]
existing_ids [{'iri':<str>,'curie':<str>'}]
ontologies [{'id':<int>}]
example:
entity = [{
'type':'term',
'label':'brain',
'existing_ids': [{
'iri':'http://ncbi.org/123',
'curie':'NCBI:123'
}]
}] | 4.331142 | 4.229472 | 1.024039 |
chunk_size = 100000
offset = 0
data = defaultdict(lambda : defaultdict(list))
with open(output, 'wb') as outfile:
query = query.replace(';', '')
query +=
while True:
print(offset)
query = query.format(
chunk_size=chunk_size,
offset=offset
)
df = pd.read_sql(query, self.engine)
pickle.dump(df, outfile)
offset += chunk_size
if len(df) < chunk_size:
break
outfile.close() | def create_df_file_with_query(self, query, output) | Dumps in df in chunks to avoid crashes. | 2.999335 | 2.813564 | 1.066027 |
''' --word-diff=porcelain clone'''
delta = difflib.Differ().compare(s1.split(), s2.split())
difflist = []
fullline = ''
for line in delta:
if line[0] == '?':
continue
elif line[0] == ' ':
fullline += line.strip() + ' '
else:
if fullline:
difflist.append(fullline[:-1])
fullline = ''
difflist.append(line)
if fullline:
difflist.append(fullline[:-1])
return [l[:] for l in '\n'.join(difflist).splitlines() if l] | def diff(s1, s2) | --word-diff=porcelain clone | 3.329634 | 2.647767 | 1.257525 |
''' --word-diff=color clone '''
string = ''
for line in diff(s1, s2):
if line[0] == '-':
string += ' ' + TermColors.red(line[2:])
elif line[0] == '+':
string += ' ' + TermColors.green(line[2:])
else:
string += ' ' + line
return string[1:] | def diffcolor(s1, s2) | --word-diff=color clone | 4.115287 | 2.857866 | 1.439986 |
''' creates basic html based on the diff of 2 strings '''
html = difflib.HtmlDiff().make_file(s1.split(), s2.split())
with open(output, 'w') as f:
f.write(html) | def create_html(s1, s2, output='test.html') | creates basic html based on the diff of 2 strings | 3.866427 | 2.564617 | 1.507604 |
''' will traverse nested list and dicts until key_target equals the current dict key '''
if isinstance(obj, str) and '.json' in str(obj):
obj = json.load(open(obj, 'r'))
if isinstance(obj, list):
queue = obj.copy()
elif isinstance(obj, dict):
queue = [obj.copy()]
else:
sys.exit('obj needs to be a list or dict')
count = 0
''' BFS '''
while not queue or count != 1000:
count += 1
curr_obj = queue.pop()
if isinstance(curr_obj, dict):
for key, value in curr_obj.items():
if key == key_target:
return curr_obj
else:
queue.append(curr_obj[key])
elif isinstance(curr_obj, list):
for co in curr_obj:
queue.append(co)
if count == 1000:
sys.exit('traverse_data needs to be updated...')
return False | def traverse_data(obj, key_target) | will traverse nested list and dicts until key_target equals the current dict key | 3.153744 | 2.720917 | 1.159074 |
''' creates a (keyname + diff) key within the json of the same layer which key_target resides.
Ex: json1={'definition':'data of key_target'}, json2={'definition':'data of key_target'}
key_target = 'definition'
Usage:
json_diff (
json_data1, json_data1 can be both [{..}] and {[..]} or json file path
json_data2, json_data2 can be both [{..}] and {[..]} or json file path
key_target, <str> of a key within a dict that holds the string data for comparison; EX: 'definition'
get_just_diff=True, default=True; will return just the color diff of the 2 strings
porcelain=False default=False; porcelain clone as output only as optional
)
'''
json1 = json_secretary(json1)
json2 = json_secretary(json2)
obj1 = traverse_data(json1, key_target)
obj2 = traverse_data(json2, key_target)
output = diffcolor(obj1[key_target], obj2[key_target])
if porcelain:
return diff(obj1[key_target], obj2[key_target])
if get_just_diff:
return output
obj1[key_target + '_diff'] = output
obj2[key_target + '_diff'] = output
return json1, json2, output | def json_diff(json1, json2, key_target, get_just_diff=True, porcelain=False) | creates a (keyname + diff) key within the json of the same layer which key_target resides.
Ex: json1={'definition':'data of key_target'}, json2={'definition':'data of key_target'}
key_target = 'definition'
Usage:
json_diff (
json_data1, json_data1 can be both [{..}] and {[..]} or json file path
json_data2, json_data2 can be both [{..}] and {[..]} or json file path
key_target, <str> of a key within a dict that holds the string data for comparison; EX: 'definition'
get_just_diff=True, default=True; will return just the color diff of the 2 strings
porcelain=False default=False; porcelain clone as output only as optional
) | 5.308383 | 1.617974 | 3.280882 |
safety_factor = 1.2
vms_max = vms_max_kb
vms_gigs = vms_max / 1024 ** 2
buffer = safety_factor * vms_max
buffer_gigs = buffer / 1024 ** 2
vm = psutil.virtual_memory()
free_gigs = vm.available / 1024 ** 2
if vm.available < buffer:
raise MemoryError('Running this requires quite a bit of memory ~ '
f'{vms_gigs:.2f}, you have {free_gigs:.2f} of the '
f'{buffer_gigs:.2f} needed') | def memoryCheck(vms_max_kb) | Lookup vms_max using getCurrentVMSKb | 3.801278 | 3.820752 | 0.994903 |
klass = self.__class__
query = klass.select().where(klass.sequence.is_null(False))
seq_scope_field_names =\
(self.__seq_scope_field_name__ or '').split(',')
for name in seq_scope_field_names:
seq_scope_field = getattr(klass, name, None)
if seq_scope_field:
seq_scope_field_value = getattr(self, name)
query = query.where(seq_scope_field == seq_scope_field_value)
return query | def _sequence_query(self) | query all sequence rows | 2.972918 | 2.850177 | 1.043064 |
SPREADSHEET_ID = devconfig.secrets(spreadsheet_name)
if spreadsheet_service is None:
service = get_oauth_service(readonly=False)
ss = service.spreadsheets()
else:
ss = spreadsheet_service
body = {'values': values}
response = ss.values().update(
spreadsheetId=SPREADSHEET_ID, range=sheet_name,
valueInputOption='USER_ENTERED', body=body).execute()
return response | def update_sheet_values(spreadsheet_name, sheet_name, values, spreadsheet_service=None) | requests = [
{'updateCells': {
'start': {'sheetId': TODO,
'rowIndex': 0,
'columnIndex': 0}
'rows': {'values'}
}
}]
response = ss.batchUpdate(
spreadsheetId=SPREADSHEET_ID, range=sheet_name,
body=body).execute() | 3.233745 | 2.836538 | 1.140032 |
if fetch_notes is None:
fetch_notes = self.fetch_notes
values, notes_index = get_sheet_values(self.name, self.sheet_name,
spreadsheet_service=self._spreadsheet_service,
get_notes=fetch_notes)
self.raw_values = values
self.values = [list(r) for r in zip(*itertools.zip_longest(*self.raw_values, fillvalue=''))]
self.byCol = byCol(self.values, to_index=self.index_columns)
self.notes_index = notes_index | def fetch(self, fetch_notes=None) | update remote values (called automatically at __init__) | 4.077996 | 3.862267 | 1.055856 |
#print(level)
for t in graph.g.triples((None, None, id_)):
if level == 0:
if t[1] != rdflib.term.URIRef('http://www.w3.org/2002/07/owl#someValuesFrom'):
continue
if type_check(t, (rdflib.term.URIRef, rdflib.term.URIRef, rdflib.term.BNode)):
#print(start, t[0])
collect[start].add(t[0])
return # we're done here, otherwise we hit instantiated subclasses
if level > 1:
if t[1] == rdflib.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#first') or \
t[1] == rdflib.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#rest'):
continue
recurse(t[0], start, level + 1)
for phenotype in phenotypes:
recurse(phenotype, phenotype)
return collect | def add_types(graph, phenotypes): # TODO missing expression phenotypes! also basket type somehow :(
collect = defaultdict(set)
def recurse(id_, start, level=0) | Add disjoint union classes so that it is possible to see the invariants
associated with individual phenotypes | 2.481981 | 2.509792 | 0.988919 |
# TODO more efficient to read once and put watch on the file
config = {}
if self.config_file.exists():
with open(self.config_file.as_posix(), 'rt') as f: # 3.5/pypy3 can't open Path directly
config = {k:self._override[k] if
k in self._override else
v for k, v in yaml.safe_load(f).items()}
return config | def config(self) | Allows changing the config on the fly | 7.961851 | 7.723357 | 1.03088 |
with open(self.service_account_file, 'r') as f:
info = json.load(f)
self.service_account_email = info.get('client_email')
if not self.service_account_email:
raise GCECloudException(
'Service account JSON file is invalid for GCE. '
'client_email key is expected. See getting started '
'docs for information on GCE configuration.'
)
self.service_account_project = info.get('project_id')
if not self.service_account_project:
raise GCECloudException(
'Service account JSON file is invalid for GCE. '
'project_id key is expected. See getting started '
'docs for information on GCE configuration.'
) | def _get_service_account_info(self) | Retrieve json dict from service account file. | 2.432348 | 2.305886 | 1.054843 |
ComputeEngine = get_driver(Provider.GCE)
return ComputeEngine(
self.service_account_email,
self.service_account_file,
project=self.service_account_project
) | def _get_driver(self) | Get authenticated GCE driver. | 5.916173 | 3.891684 | 1.520209 |
try:
instance = self.compute_driver.ex_get_node(
self.running_instance_id,
zone=self.region
)
except ResourceNotFoundError as e:
raise GCECloudException(
'Instance with id: {id} cannot be found: {error}'.format(
id=self.running_instance_id, error=e
)
)
return instance | def _get_instance(self) | Retrieve instance matching instance_id. | 4.145273 | 3.536962 | 1.171987 |
key = ipa_utils.generate_public_ssh_key(self.ssh_private_key_file)
return '{user}:{key} {user}'.format(
user=self.ssh_user,
key=key.decode()
) | def _get_ssh_public_key(self) | Generate SSH public key from private key. | 6.282293 | 5.312284 | 1.182597 |
metadata = {'key': 'ssh-keys', 'value': self.ssh_public_key}
self.running_instance_id = ipa_utils.generate_instance_name(
'gce-ipa-test'
)
self.logger.debug('ID of instance: %s' % self.running_instance_id)
kwargs = {
'location': self.region,
'ex_metadata': metadata,
'ex_service_accounts': [{
'email': self.service_account_email,
'scopes': ['storage-ro']
}]
}
if self.subnet_id:
kwargs['ex_subnetwork'] = self._get_subnet(self.subnet_id)
kwargs['ex_network'] = kwargs['ex_subnetwork'].network
try:
instance = self.compute_driver.create_node(
self.running_instance_id,
self.instance_type or GCE_DEFAULT_TYPE,
self.image_id,
**kwargs
)
except ResourceNotFoundError as error:
try:
message = error.value['message']
except TypeError:
message = error
raise GCECloudException(
'An error occurred launching instance: {message}.'.format(
message=message
)
)
self.compute_driver.wait_until_running(
[instance],
timeout=self.timeout
) | def _launch_instance(self) | Launch an instance of the given image. | 3.425122 | 3.346012 | 1.023643 |
if not self.region:
raise GCECloudException(
'Zone is required for GCE cloud framework: '
'Example: us-west1-a'
)
try:
zone = self.compute_driver.ex_get_zone(self.region)
except Exception:
zone = None
if not zone:
raise GCECloudException(
'{region} is not a valid GCE zone. '
'Example: us-west1-a'.format(
region=self.region
)
) | def _validate_region(self) | Validate region was passed in and is a valid GCE zone. | 3.587882 | 3.006177 | 1.193503 |
instance = self._get_instance()
if instance.public_ips:
self.instance_ip = instance.public_ips[0]
elif instance.private_ips:
self.instance_ip = instance.private_ips[0]
else:
raise GCECloudException(
'IP address for instance: %s cannot be found.'
% self.running_instance_id
) | def _set_instance_ip(self) | Retrieve and set the instance ip address. | 3.152226 | 2.909523 | 1.083417 |
instance = self._get_instance()
self.compute_driver.ex_start_node(instance)
self.compute_driver.wait_until_running(
[instance],
timeout=self.timeout
) | def _start_instance(self) | Start the instance. | 4.346817 | 4.003379 | 1.085787 |
instance = self._get_instance()
self.compute_driver.ex_stop_node(instance)
self._wait_on_instance('stopped', timeout=self.timeout) | def _stop_instance(self) | Stop the instance. | 5.128904 | 4.926621 | 1.041059 |
''' Crap-shot for ontology iri if its properly in the header and correctly formated '''
version = g.subject_objects( predicate = URIRef( OWL.versionIRI ) )
version = [o for s, o in version]
if len(version) != 1:
print('versioning isn\'t correct')
else:
version = str(version[0])
return version | def grab_rdflib_graph_version(g: Graph) -> str | Crap-shot for ontology iri if its properly in the header and correctly formated | 10.406963 | 4.031502 | 2.581411 |
''' Database only excepts lower case and underscore version of ID '''
ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')
if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:
raise ValueError(
'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)
return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_') | def fix_ilx(self, ilx_id: str) -> str | Database only excepts lower case and underscore version of ID | 6.555178 | 4.393604 | 1.491982 |
''' Useful for IDs that have giberish in the front of the real ID '''
int_tail = ''
for element in string[::-1]:
try:
int(element)
int_tail = element + int_tail
except:
pass
return int_tail | def pull_int_tail(self, string: str) -> str | Useful for IDs that have giberish in the front of the real ID | 7.017945 | 2.861726 | 2.452347 |
''' Pulls only for code/ID from the iri
I only add the str() conversion for the iri because rdflib objects need to be converted.
'''
fragment = str(iri).rsplit('/')[-1].split(':', 1)[-1].split('#', 1)[-1].split('_', 1)[-1]
return fragment | def extract_fragment(self, iri: str) -> str | Pulls only for code/ID from the iri
I only add the str() conversion for the iri because rdflib objects need to be converted. | 11.344341 | 2.8616 | 3.964335 |
''' Returns the row in InterLex associated with the curie
Note:
Pressumed to not have duplicate curies in InterLex
Args:
curie: The "prefix:fragment_id" of the existing_id pertaining to the ontology
Returns:
None or dict
'''
ilx_row = self.curie2row.get(curie)
if not ilx_row:
return None
else:
return ilx_row | def curie_search(self, curie:str) -> dict | Returns the row in InterLex associated with the curie
Note:
Pressumed to not have duplicate curies in InterLex
Args:
curie: The "prefix:fragment_id" of the existing_id pertaining to the ontology
Returns:
None or dict | 11.35444 | 2.07102 | 5.482535 |
''' Returns the rows in InterLex associated with the fragment
Note:
Pressumed to have duplicate fragements in InterLex
Args:
fragment: The fragment_id of the curie pertaining to the ontology
Returns:
None or List[dict]
'''
fragement = self.extract_fragment(fragement)
ilx_rows = self.fragment2rows.get(fragement)
if not ilx_rows:
return None
else:
return ilx_rows | def fragment_search(self, fragement:str) -> List[dict] | Returns the rows in InterLex associated with the fragment
Note:
Pressumed to have duplicate fragements in InterLex
Args:
fragment: The fragment_id of the curie pertaining to the ontology
Returns:
None or List[dict] | 9.709425 | 2.359333 | 4.115326 |
''' Returns the rows in InterLex associated with that label
Note:
Pressumed to have duplicated labels in InterLex
Args:
label: label of the entity you want to find
Returns:
None or List[dict]
'''
ilx_rows = self.label2rows(self.local_degrade(label))
if not ilx_rows:
return None
else:
return ilx_rows | def label_search(self, label:str) -> List[dict] | Returns the rows in InterLex associated with that label
Note:
Pressumed to have duplicated labels in InterLex
Args:
label: label of the entity you want to find
Returns:
None or List[dict] | 11.729119 | 3.048267 | 3.847799 |
''' Setups the entity to be InterLex ready
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
uid: usually fine and auto completes to api user ID, but if you provide one with a
clearance higher than 0 you can make your own custom. Good for mass imports by one
person to avoid label collides.
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
existing_ids: existing curie/iris that link data | couldnt format this easier
Returns:
dict
'''
entity = dict(
label = label,
type = type,
)
if uid:
entity['uid'] = uid
if definition:
entity['definition'] = definition
if comment:
entity['comment'] = comment
if superclass:
entity['superclass'] = {'ilx_id':self.fix_ilx(superclass)}
if synonyms:
entity['synonyms'] = [{'literal': syn} for syn in synonyms]
if existing_ids:
if existing_ids[0].get('curie') and existing_ids[0].get('iri'):
pass
else:
exit('Need curie and iri for existing_ids in List[dict] form')
entity['existing_ids'] = existing_ids
return entity | def readyup_entity(
self,
label: str,
type: str,
uid: Union[int, str] = None,
comment: str = None,
definition: str = None,
superclass: str = None,
synonyms: list = None,
existing_ids: List[dict] = None, ) -> dict | Setups the entity to be InterLex ready
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
uid: usually fine and auto completes to api user ID, but if you provide one with a
clearance higher than 0 you can make your own custom. Good for mass imports by one
person to avoid label collides.
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
existing_ids: existing curie/iris that link data | couldnt format this easier
Returns:
dict | 8.294979 | 1.724633 | 4.809707 |
''' Helper for exhaustive checks to see if there any matches at all besides the anchor
OUTPUT:
[
{
'external_ontology_row' : {},
'interlex_row' : {},
'same': {},
},
...
],
'''
def compare_rows(external_row:dict, ilx_row:dict) -> List[dict]:
''' dictionary comparator '''
def compare_values(string1:Union[str, None], string2:Union[str, None]) -> bool:
''' string comparator '''
if string1 is None or string2 is None:
return False
elif not isinstance(string1, str) or not isinstance(string2, str):
return False
elif string1.lower().strip() != string2.lower().strip():
return False
else:
return True
accepted_ilx_keys = ['label', 'definition']
local_diff = set()
for external_key, external_value in external_row.items():
if not external_value:
continue
if isinstance(external_value, list):
external_values = external_value
for external_value in external_values:
for ilx_key, ilx_value in ilx_row.items():
if ilx_key not in accepted_ilx_keys:
continue
if compare_values(external_value, ilx_value):
local_diff.add(
#((external_key, external_value), (ilx_key, ilx_value))
ilx_key # best to just have what you need and infer the rest :)
)
else:
for ilx_key, ilx_value in ilx_row.items():
if ilx_key not in accepted_ilx_keys:
continue
if compare_values(external_value, ilx_value):
local_diff.add(
#((external_key, external_value), (ilx_key, ilx_value))
ilx_key # best to just have what you need and infer the rest :)
)
local_diff = list(local_diff)
diff = {
'external_ontology_row': external_row,
'ilx_row': ilx_row,
'same': local_diff,
}
return diff
diff = []
for check_dict in check_list:
external_ontology_row = check_dict['external_ontology_row']
diff.append(
[compare_rows(external_ontology_row, ilx_row) for ilx_row in check_dict['ilx_rows']]
)
return diff | def __exhaustive_diff(self, check_list:List[dict]) -> List[List[dict]] | Helper for exhaustive checks to see if there any matches at all besides the anchor
OUTPUT:
[
{
'external_ontology_row' : {},
'interlex_row' : {},
'same': {},
},
...
], | 2.509808 | 1.966277 | 1.276427 |
''' All entities with conflicting labels gets a full diff
Args:
ontology: pandas DataFrame created from an ontology where the colnames are predicates
and if classes exist it is also thrown into a the colnames.
label_predicate: usually in qname form and is the colname of the DataFrame for the label
diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2
Returns:
inside: entities that are inside of InterLex
outside: entities NOT in InterLex
diff (optional): List[List[dict]]... so complicated but usefull diff between matches only '''
inside, outside = [], []
header = ['Index'] + list(ontology.columns)
for row in ontology.itertuples():
row = {header[i]:val for i, val in enumerate(row)}
label_obj = row[label_predicate]
if isinstance(label_obj, list):
if len(label_obj) != 1:
exit('Need to have only 1 label in the cell from the onotology.')
else:
label_obj = label_obj[0]
entity_label = self.local_degrade(label_obj)
ilx_rows = self.label2rows.get(entity_label)
if ilx_rows:
inside.append({
'external_ontology_row': row,
'ilx_rows': ilx_rows,
})
else:
outside.append(row)
if diff:
diff = self.__exhaustive_diff(inside)
return inside, outside, diff
return inside, outside | def exhaustive_label_check( self,
ontology:pd.DataFrame,
label_predicate='rdfs:label',
diff:bool=True, ) -> Tuple[list] | All entities with conflicting labels gets a full diff
Args:
ontology: pandas DataFrame created from an ontology where the colnames are predicates
and if classes exist it is also thrown into a the colnames.
label_predicate: usually in qname form and is the colname of the DataFrame for the label
diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2
Returns:
inside: entities that are inside of InterLex
outside: entities NOT in InterLex
diff (optional): List[List[dict]]... so complicated but usefull diff between matches only | 8.614008 | 2.532708 | 3.401105 |
''' All entities with conflicting iris gets a full diff to see if they belong
Args:
ontology: pandas DataFrame created from an ontology where the colnames are predicates
and if classes exist it is also thrown into a the colnames.
iri_predicate: usually in qname form and is the colname of the DataFrame for iri
Default is "iri" for graph2pandas module
diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2
Returns:
inside: entities that are inside of InterLex
outside: entities NOT in InterLex
diff (optional): List[List[dict]]... so complicated but usefull diff between matches only '''
inside, outside = [], []
header = ['Index'] + list(ontology.columns)
for row in ontology.itertuples():
row = {header[i]:val for i, val in enumerate(row)}
entity_iri = row[iri_predicate]
if isinstance(entity_iri, list):
if len(entity_iri) != 0:
exit('Need to have only 1 iri in the cell from the onotology.')
else:
entity_iri = entity_iri[0]
ilx_row = self.iri2row.get(entity_iri)
if ilx_row:
inside.append({
'external_ontology_row': row,
'ilx_rows': [ilx_row],
})
else:
outside.append(row)
if diff:
diff = self.__exhaustive_diff(inside)
return inside, outside, diff
return inside, outside | def exhaustive_iri_check( self,
ontology:pd.DataFrame,
iri_predicate:str,
diff:bool=True, ) -> Tuple[list] | All entities with conflicting iris gets a full diff to see if they belong
Args:
ontology: pandas DataFrame created from an ontology where the colnames are predicates
and if classes exist it is also thrown into a the colnames.
iri_predicate: usually in qname form and is the colname of the DataFrame for iri
Default is "iri" for graph2pandas module
diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2
Returns:
inside: entities that are inside of InterLex
outside: entities NOT in InterLex
diff (optional): List[List[dict]]... so complicated but usefull diff between matches only | 9.595849 | 2.337519 | 4.105142 |
''' All entities with conflicting curies gets a full diff to see if they belong
Args:
ontology: pandas DataFrame created from an ontology where the colnames are predicates
and if classes exist it is also thrown into a the colnames.
curie_predicate: usually in qname form and is the colname of the DataFrame
curie_prefix: Not all cells in the DataFrame will have complete curies so we extract
the fragement from the cell and use the prefix to complete it.
diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2
Returns:
inside: entities that are inside of InterLex
outside: entities NOT in InterLex
diff (optional): List[List[dict]]... so complicated but usefull diff between matches only '''
inside, outside = [], []
curie_prefix = curie_prefix.replace(':', '') # just in case I forget a colon isnt in a prefix
header = ['Index'] + list(ontology.columns)
for row in ontology.itertuples():
row = {header[i]:val for i, val in enumerate(row)}
entity_curie = row[curie_predicate]
if isinstance(entity_curie, list):
if len(entity_curie) != 0:
exit('Need to have only 1 iri in the cell from the onotology.')
else:
entity_curie = entity_curie[0]
entity_curie = curie_prefix + ':' + self.extract_fragment(entity_curie)
ilx_row = self.curie2row.get(entity_curie)
if ilx_row:
inside.append({
'external_ontology_row': row,
'ilx_rows': [ilx_row],
})
else:
outside.append(row)
if diff:
diff = self.__exhaustive_diff(inside)
return inside, outside, diff
return inside, outside | def exhaustive_curie_check( self,
ontology:pd.DataFrame,
curie_predicate:str,
curie_prefix:str,
diff:bool=True, ) -> Tuple[list] | All entities with conflicting curies gets a full diff to see if they belong
Args:
ontology: pandas DataFrame created from an ontology where the colnames are predicates
and if classes exist it is also thrown into a the colnames.
curie_predicate: usually in qname form and is the colname of the DataFrame
curie_prefix: Not all cells in the DataFrame will have complete curies so we extract
the fragement from the cell and use the prefix to complete it.
diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2
Returns:
inside: entities that are inside of InterLex
outside: entities NOT in InterLex
diff (optional): List[List[dict]]... so complicated but usefull diff between matches only | 8.119649 | 2.382181 | 3.408493 |
''' All entities with conflicting fragments gets a full diff to see if they belong
Args:
ontology: pandas DataFrame created from an ontology where the colnames are predicates
and if classes exist it is also thrown into a the colnames.
iri_curie_fragment_predicate: usually in qname form and is the colname of the DataFrame for iri
Default is "iri" for graph2pandas module
diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2
Returns:
inside: entities that are inside of InterLex
outside: entities NOT in InterLex
diff (optional): List[List[dict]]... so complicated but usefull diff between matches only '''
inside, outside = [], []
header = ['Index'] + list(ontology.columns)
for row in ontology.itertuples():
row = {header[i]:val for i, val in enumerate(row)}
entity_suffix = row[iri_curie_fragment_predicate]
if isinstance(entity_suffix, list):
if len(entity_suffix) != 0:
exit('Need to have only 1 iri in the cell from the onotology.')
else:
entity_suffix = entity_suffix[0]
entity_fragment = self.extract_fragment(entity_suffix)
ilx_rows = self.fragment2rows.get(entity_fragment)
if cross_reference_fragments and ilx_rows:
ilx_rows = [row for row in ilx_rows if entity_fragment.lower() in row['iri'].lower()]
if cross_reference_iris and ilx_rows:
# true suffix of iris
ilx_rows = [row for row in ilx_rows if entity_suffix.rsplit('/', 1)[-1].lower() in row['iri'].lower()]
if ilx_rows:
inside.append({
'external_ontology_row': row,
'ilx_rows': ilx_rows,
})
else:
outside.append(row)
if diff:
diff = self.__exhaustive_diff(inside)
return inside, outside, diff
return inside, outside | def exhaustive_fragment_check( self,
ontology:pd.DataFrame,
iri_curie_fragment_predicate:str = 'iri',
cross_reference_iris:bool = False,
cross_reference_fragments:bool = False,
diff:bool = True, ) -> Tuple[list] | All entities with conflicting fragments gets a full diff to see if they belong
Args:
ontology: pandas DataFrame created from an ontology where the colnames are predicates
and if classes exist it is also thrown into a the colnames.
iri_curie_fragment_predicate: usually in qname form and is the colname of the DataFrame for iri
Default is "iri" for graph2pandas module
diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2
Returns:
inside: entities that are inside of InterLex
outside: entities NOT in InterLex
diff (optional): List[List[dict]]... so complicated but usefull diff between matches only | 6.692374 | 2.344815 | 2.854116 |
''' WARNING RUNTIME IS AWEFUL '''
results = []
header = ['Index'] + list(self.existing_ids.columns)
for row in self.existing_ids.itertuples():
row = {header[i]:val for i, val in enumerate(row)}
check_list = [
{
'external_ontology_row': ontology_row,
'ilx_rows': [row],
},
]
# First layer for each external row. Second is for each potential ilx row. It's simple here 1-1.
result = self.__exhaustive_diff(check_list)[0][0]
if result['same']:
results.append(result)
return results | def exhaustive_ontology_ilx_diff_row_only( self, ontology_row: dict ) -> dict | WARNING RUNTIME IS AWEFUL | 8.650434 | 7.077875 | 1.22218 |
''' Combo of label & definition exhaustive check out of convenience
Args:
ontology: pandas DataFrame created from an ontology where the colnames are predicates
and if classes exist it is also thrown into a the colnames.
label_predicate: usually in qname form and is the colname of the DataFrame for the label
diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2
Returns:
inside: entities that are inside of InterLex
outside: entities NOT in InterLex
diff (optional): List[List[dict]]... so complicated but usefull diff between matches only '''
inside, outside = [], []
header = ['Index'] + list(ontology.columns)
for row in ontology.itertuples():
row = {header[i]:val for i, val in enumerate(row)}
label_obj = row[label_predicate]
if isinstance(label_obj, list):
if len(label_obj) != 1:
exit('Need to have only 1 label in the cell from the onotology.')
else:
label_obj = label_obj[0]
entity_label = self.local_degrade(label_obj)
label_search_results = self.label2rows.get(entity_label)
label_ilx_rows = label_search_results if label_search_results else []
definition_ilx_rows = []
for definition_predicate in definition_predicates:
definition_objs = row[definition_predicate]
if not definition_objs:
continue
definition_objs = [definition_objs] if not isinstance(definition_objs, list) else definition_objs
for definition_obj in definition_objs:
definition_obj = self.local_degrade(definition_obj)
definition_search_results = self.definition2rows.get(definition_obj)
if definition_search_results:
definition_ilx_rows.extend(definition_search_results)
ilx_rows = [dict(t) for t in {tuple(d.items()) for d in (label_ilx_rows + definition_ilx_rows)}]
if ilx_rows:
inside.append({
'external_ontology_row': row,
'ilx_rows': ilx_rows,
})
else:
outside.append(row)
if diff:
diff = self.__exhaustive_diff(inside)
return inside, outside, diff
return inside, outside | def combo_exhaustive_label_definition_check( self,
ontology: pd.DataFrame,
label_predicate:str,
definition_predicates:str,
diff = True) -> List[List[dict]] | Combo of label & definition exhaustive check out of convenience
Args:
ontology: pandas DataFrame created from an ontology where the colnames are predicates
and if classes exist it is also thrown into a the colnames.
label_predicate: usually in qname form and is the colname of the DataFrame for the label
diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2
Returns:
inside: entities that are inside of InterLex
outside: entities NOT in InterLex
diff (optional): List[List[dict]]... so complicated but usefull diff between matches only | 4.845417 | 2.183659 | 2.218944 |
if ip:
with ignored(Exception):
client = CLIENT_CACHE[ip]
del CLIENT_CACHE[ip]
client.close()
else:
for client in CLIENT_CACHE.values():
with ignored(Exception):
client.close()
CLIENT_CACHE.clear() | def clear_cache(ip=None) | Clear the client cache or remove key matching the given ip. | 2.84111 | 2.62656 | 1.081685 |
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
while attempts:
try:
client.connect(
ip,
port=port,
username=ssh_user,
key_filename=ssh_private_key_file,
timeout=timeout
)
except: # noqa: E722
attempts -= 1
time.sleep(10)
else:
return client
raise IpaSSHException(
'Failed to establish SSH connection to instance.'
) | def establish_ssh_connection(ip,
ssh_private_key_file,
ssh_user,
port,
attempts=5,
timeout=None) | Establish ssh connection and return paramiko client.
Raises:
IpaSSHException: If connection cannot be established
in given number of attempts. | 2.16567 | 1.959489 | 1.105222 |
try:
stdin, stdout, stderr = client.exec_command(cmd)
err = stderr.read()
out = stdout.read()
if err:
raise IpaSSHException(out.decode() + err.decode())
except: # noqa: E722
raise
return out.decode() | def execute_ssh_command(client, cmd) | Execute given command using paramiko.
Returns:
String output of cmd execution.
Raises:
IpaSSHException: If stderr returns a non-empty string. | 3.212432 | 2.63247 | 1.220311 |
command = 'tar -xf {path}'.format(path=archive_path)
if extract_path:
command += ' -C {extract_path}'.format(extract_path=extract_path)
out = execute_ssh_command(client, command)
return out | def extract_archive(client, archive_path, extract_path=None) | Extract the archive in current path using the provided client.
If extract_path is provided extract the archive there. | 2.606377 | 3.003994 | 0.867637 |
try:
with open(ssh_private_key_file, "rb") as key_file:
key = key_file.read()
except FileNotFoundError:
raise IpaUtilsException(
'SSH private key file: %s cannot be found.' % ssh_private_key_file
)
try:
private_key = serialization.load_pem_private_key(
key,
password=None,
backend=default_backend()
)
except ValueError:
raise IpaUtilsException(
'SSH private key file: %s is not a valid key file.'
% ssh_private_key_file
)
return private_key.public_key().public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH
) | def generate_public_ssh_key(ssh_private_key_file) | Generate SSH public key from private key file. | 1.966213 | 1.910621 | 1.029096 |
values = {}
if not os.path.isfile(config_path):
raise IpaUtilsException(
'Config file not found: %s' % config_path
)
config = configparser.ConfigParser()
try:
config.read(config_path)
except Exception:
raise IpaUtilsException(
'Config file format invalid.'
)
try:
values.update(config.items(default))
except Exception:
pass
try:
values.update(config.items(section))
except Exception:
pass
return values | def get_config_values(config_path, section, default='default') | Parse ini config file and return a dict of values.
The provided section overrides any values in default section. | 2.2565 | 2.331986 | 0.96763 |
if ip in CLIENT_CACHE:
return CLIENT_CACHE[ip]
start = time.time()
end = start + timeout
client = None
while time.time() < end:
try:
client = establish_ssh_connection(
ip,
ssh_private_key_file,
ssh_user,
port,
timeout=wait_period
)
execute_ssh_command(client, 'ls')
except: # noqa: E722
if client:
client.close()
wait_period += wait_period
else:
CLIENT_CACHE[ip] = client
return client
raise IpaSSHException(
'Attempt to establish SSH connection failed.'
) | def get_ssh_client(ip,
ssh_private_key_file,
ssh_user='root',
port=22,
timeout=600,
wait_period=10) | Attempt to establish and test ssh connection. | 3.042675 | 2.961067 | 1.02756 |
config_path = os.path.expanduser(config_path)
if not os.path.isfile(config_path):
raise IpaUtilsException(
'Config file not found: %s' % config_path
)
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
return config | def get_yaml_config(config_path) | Load yaml config file and return dictionary.
Todo:
* This will need refactoring similar to the test search. | 2.400403 | 2.648958 | 0.906169 |
test_files = []
section = set()
for name in names:
if name in SYNC_POINTS:
if section:
test_files.append(section)
test_files.append(name)
section = set()
else:
section.add(find_test_file(name, tests))
if section:
test_files.append(section)
return test_files | def parse_sync_points(names, tests) | Slice list of test names on sync points.
If test is test file find full path to file.
Returns:
A list of test file sets and sync point strings.
Examples:
['test_hard_reboot']
[set('test1', 'test2')]
[set('test1', 'test2'), 'test_soft_reboot']
[set('test1', 'test2'), 'test_soft_reboot', set('test3')] | 2.844705 | 3.064406 | 0.928305 |
try:
sftp_client = client.open_sftp()
sftp_client.put(source_file, destination_file)
except Exception as error:
raise IpaUtilsException(
'Error copying file to instance: {0}.'.format(error)
)
finally:
with ignored(Exception):
sftp_client.close() | def put_file(client, source_file, destination_file) | Copy file to instance using Paramiko client connection. | 3.512148 | 3.132316 | 1.121262 |
old = sys.stdout
sys.stdout = fileobj
try:
yield fileobj
finally:
sys.stdout = old | def redirect_output(fileobj) | Redirect standard out to file. | 2.376014 | 2.493166 | 0.953011 |
try:
ssh_file = NamedTemporaryFile(delete=False, mode='w+')
ssh_file.write('Host *\n')
ssh_file.write(' IdentityFile %s\n' % ssh_private_key_file)
ssh_file.write(' User %s' % ssh_user)
ssh_file.close()
yield ssh_file.name
finally:
with ignored(OSError):
os.remove(ssh_file.name) | def ssh_config(ssh_user, ssh_private_key_file) | Create temporary ssh config file. | 2.027084 | 1.941098 | 1.044298 |
if not test_log and not clear:
raise IpaUtilsException(
'A test log or clear flag must be provided.'
)
if clear:
with ignored(OSError):
os.remove(history_log)
else:
history_dir = os.path.dirname(history_log)
if not os.path.isdir(history_dir):
try:
os.makedirs(history_dir)
except OSError as error:
raise IpaUtilsException(
'Unable to create directory: %s' % error
)
with open(history_log, 'a+') as f:
# Using append mode creates file if it does not exist
if description:
description = '"%s"' % description
out = '{} {}'.format(
test_log,
description or ''
)
f.write(out.strip() + '\n') | def update_history_log(history_log,
clear=False,
description=None,
test_log=None) | Update the history log file with item.
If clear flag is provided the log file is deleted. | 3.051874 | 3.002425 | 1.01647 |
if not self._compiled_regex.match(value):
raise ValidationError(
'value {:s} not match r"{:s}"'.format(value, self._regex)) | def validate(self, value) | Validate string by regex
:param value: str
:return: | 6.492203 | 6.509235 | 0.997383 |
'''Updates self.g or self.path bc you could only choose 1'''
if isinstance(self.path, str) or isinstance(self.path, p):
self.path = str(self.path)
filetype = p(self.path).suffix
if filetype == '.json':
self.g = None
try:
records = open_json(self.path)
return pd.DataFrame(records)
except:
exit('Json file is not in records format.')
if filetype == '.pickle':
self.g = None
return pickle.load(open(self.path, 'rb'))
elif filetype == '.ttl' or filetype == '.rdf':
self.g = rdflib.Graph()
self.g.parse(self.path, format='turtle')
return self.get_sparql_dataframe()
elif filetype == '.nt':
self.g = rdflib.Graph()
self.g.parse(self.path, format='nt')
return self.get_sparql_dataframe()
elif filetype == '.owl' or filetype == '.xrdf':
self.g = rdflib.Graph()
try:
self.g.parse(self.path, format='xml')
except:
# some owl formats are more rdf than owl
self.g.parse(self.path, format='turtle')
return self.get_sparql_dataframe()
else:
exit('Format options: owl, ttl, df_pickle, rdflib.Graph()')
try:
return self.get_sparql_dataframe()
self.path = None
except:
exit('Format options: owl, ttl, df_pickle, rdflib.Graph()')
elif isinstance(self.g, rdflib.graph.Graph):
self.path = None
return self.get_sparql_dataframe()
else:
exit('Obj given is not str, pathlib obj, or an rdflib.Graph()') | def ontology2df(self) | Updates self.g or self.path bc you could only choose 1 | 3.062718 | 2.669144 | 1.147453 |
''' Takes list linked to common name and maps common name to accepted predicate
and their respected suffixes to decrease sensitivity.
'''
self.pred2common = {}
for common_name, ext_preds in self.common2preds.items():
for pred in ext_preds:
pred = pred.lower().strip()
self.pred2common[pred] = common_name | def create_pred2common(self) | Takes list linked to common name and maps common name to accepted predicate
and their respected suffixes to decrease sensitivity. | 9.360501 | 2.599725 | 3.600574 |
''' Takes the predicate and returns the suffix, lower case, stripped version
'''
original_pred = pred
pred = pred.lower().strip()
if 'http' in pred:
pred = pred.split('/')[-1]
elif ':' in pred:
if pred[-1] != ':': # some matches are "prefix:" only
pred = pred.split(':')[-1]
else:
if not ignore_warning:
exit('Not a valid predicate: ' + original_pred + '. Needs to be an iri "/" or curie ":".')
return pred | def clean_pred(self, pred, ignore_warning=False) | Takes the predicate and returns the suffix, lower case, stripped version | 7.757376 | 5.656579 | 1.37139 |
''' Gets version of predicate and sees if we have a translation to a common relation.
INPUT:
pred = predicate from the triple
OUTPUT:
Common relationship or None
'''
pred = self.clean_pred(pred)
common_pred = self.pred2common.get(pred)
return common_pred | def get_common_pred(self, pred) | Gets version of predicate and sees if we have a translation to a common relation.
INPUT:
pred = predicate from the triple
OUTPUT:
Common relationship or None | 9.443181 | 2.718756 | 3.473346 |
nic_config = {
'location': region,
'ip_configurations': [{
'name': ip_config_name,
'private_ip_allocation_method': 'Dynamic',
'subnet': {
'id': subnet.id
},
'public_ip_address': {
'id': public_ip.id
},
}]
}
if accelerated_networking:
nic_config['enable_accelerated_networking'] = True
try:
nic_setup = self.network.network_interfaces.create_or_update(
resource_group_name, nic_name, nic_config
)
except Exception as error:
raise AzureCloudException(
'Unable to create network interface: {0}.'.format(
error
)
)
return nic_setup.result() | def _create_network_interface(
self, ip_config_name, nic_name, public_ip, region,
resource_group_name, subnet, accelerated_networking=False
) | Create a network interface in the resource group.
Attach NIC to the subnet and public IP provided. | 1.795648 | 1.795607 | 1.000023 |
public_ip_config = {
'location': region,
'public_ip_allocation_method': 'Dynamic'
}
try:
public_ip_setup = \
self.network.public_ip_addresses.create_or_update(
resource_group_name, public_ip_name, public_ip_config
)
except Exception as error:
raise AzureCloudException(
'Unable to create public IP: {0}.'.format(error)
)
return public_ip_setup.result() | def _create_public_ip(self, public_ip_name, resource_group_name, region) | Create dynamic public IP address in the resource group. | 2.279226 | 2.204682 | 1.033811 |
resource_group_config = {'location': region}
try:
self.resource.resource_groups.create_or_update(
resource_group_name, resource_group_config
)
except Exception as error:
raise AzureCloudException(
'Unable to create resource group: {0}.'.format(error)
) | def _create_resource_group(self, region, resource_group_name) | Create resource group if it does not exist. | 2.850346 | 2.575825 | 1.106576 |
if self.image_publisher:
storage_profile = {
'image_reference': {
'publisher': self.image_publisher,
'offer': self.image_offer,
'sku': self.image_sku,
'version': self.image_version
},
}
else:
for image in self.compute.images.list():
if image.name == self.image_id:
image_id = image.id
break
else:
raise AzureCloudException(
'Image with name {0} not found.'.format(self.image_id)
)
storage_profile = {
'image_reference': {
'id': image_id
}
}
return storage_profile | def _create_storage_profile(self) | Create the storage profile for the instance.
Image reference can be a custom image name or a published urn. | 2.046321 | 1.900373 | 1.0768 |
subnet_config = {'address_prefix': '10.0.0.0/29'}
try:
subnet_setup = self.network.subnets.create_or_update(
resource_group_name, vnet_name, subnet_id, subnet_config
)
except Exception as error:
raise AzureCloudException(
'Unable to create subnet: {0}.'.format(error)
)
return subnet_setup.result() | def _create_subnet(self, resource_group_name, subnet_id, vnet_name) | Create a subnet in the provided vnet and resource group. | 2.549084 | 2.415805 | 1.055169 |
vnet_config = {
'location': region,
'address_space': {
'address_prefixes': ['10.0.0.0/27']
}
}
try:
vnet_setup = self.network.virtual_networks.create_or_update(
resource_group_name, vnet_name, vnet_config
)
except Exception as error:
raise AzureCloudException(
'Unable to create vnet: {0}.'.format(error)
)
vnet_setup.wait() | def _create_virtual_network(self, region, resource_group_name, vnet_name) | Create a vnet in the given resource group with default address space. | 2.279017 | 2.146237 | 1.061866 |
try:
vm_setup = self.compute.virtual_machines.create_or_update(
self.running_instance_id, self.running_instance_id,
vm_config
)
except Exception as error:
raise AzureCloudException(
'An exception occurred creating virtual machine: {0}'.format(
error
)
)
vm_setup.wait() | def _create_vm(self, vm_config) | Attempt to create or update VM instance based on vm_parameters config. | 4.009966 | 3.763278 | 1.065552 |
# Split image ID into it's components.
self._process_image_id()
hardware_profile = {
'vm_size': self.instance_type or AZURE_DEFAULT_TYPE
}
network_profile = {
'network_interfaces': [{
'id': interface.id,
'primary': True
}]
}
storage_profile = self._create_storage_profile()
os_profile = {
'computer_name': self.running_instance_id,
'admin_username': self.ssh_user,
'linux_configuration': {
'disable_password_authentication': True,
'ssh': {
'public_keys': [{
'path': '/home/{0}/.ssh/authorized_keys'.format(
self.ssh_user
),
'key_data': self.ssh_public_key
}]
}
}
}
vm_config = {
'location': self.region,
'os_profile': os_profile,
'hardware_profile': hardware_profile,
'storage_profile': storage_profile,
'network_profile': network_profile
}
return vm_config | def _create_vm_config(self, interface) | Create the VM config dictionary.
Requires an existing network interface object. | 2.16124 | 2.087766 | 1.035193 |
try:
instance = self.compute.virtual_machines.get(
self.running_instance_id, self.running_instance_id,
expand='instanceView'
)
except Exception as error:
raise AzureCloudException(
'Unable to retrieve instance: {0}'.format(error)
)
return instance | def _get_instance(self) | Return the instance matching the running_instance_id. | 4.136845 | 3.094366 | 1.336896 |
instance = self._get_instance()
statuses = instance.instance_view.statuses
for status in statuses:
if status.code.startswith('PowerState'):
return status.display_status | def _get_instance_state(self) | Retrieve state of instance. | 4.602909 | 3.867649 | 1.190105 |
try:
client = get_client_from_auth_file(
client_class, auth_path=self.service_account_file
)
except ValueError as error:
raise AzureCloudException(
'Service account file format is invalid: {0}.'.format(error)
)
except KeyError as error:
raise AzureCloudException(
'Service account file missing key: {0}.'.format(error)
)
except Exception as error:
raise AzureCloudException(
'Unable to create resource management client: '
'{0}.'.format(error)
)
return client | def _get_management_client(self, client_class) | Return instance of resource management client. | 2.827691 | 2.581501 | 1.095367 |
self.running_instance_id = ipa_utils.generate_instance_name(
'azure-ipa-test'
)
self.logger.debug('ID of instance: %s' % self.running_instance_id)
self._set_default_resource_names()
try:
# Try block acts as a transaction. If an exception is raised
# attempt to cleanup the resource group and all created resources.
# Create resource group.
self._create_resource_group(self.region, self.running_instance_id)
if self.subnet_id:
# Use existing vnet/subnet.
subnet = self.network.subnets.get(
self.vnet_resource_group, self.vnet_name, self.subnet_id
)
else:
self.subnet_id = ''.join([self.running_instance_id, '-subnet'])
self.vnet_name = ''.join([self.running_instance_id, '-vnet'])
# Create new vnet
self._create_virtual_network(
self.region, self.running_instance_id, self.vnet_name
)
# Create new subnet in new vnet
subnet = self._create_subnet(
self.running_instance_id, self.subnet_id, self.vnet_name
)
# Setup interface and public ip in resource group.
public_ip = self._create_public_ip(
self.public_ip_name, self.running_instance_id, self.region
)
interface = self._create_network_interface(
self.ip_config_name, self.nic_name, public_ip, self.region,
self.running_instance_id, subnet, self.accelerated_networking
)
# Get dictionary of VM parameters and create instance.
vm_config = self._create_vm_config(interface)
self._create_vm(vm_config)
except Exception:
try:
self._terminate_instance()
except Exception:
pass
raise
else:
# Ensure VM is in the running state.
self._wait_on_instance('VM running', timeout=self.timeout) | def _launch_instance(self) | Create new test instance in a resource group with the same name. | 3.217001 | 3.196338 | 1.006464 |
try:
image_info = self.image_id.strip().split(':')
self.image_publisher = image_info[0]
self.image_offer = image_info[1]
self.image_sku = image_info[2]
self.image_version = image_info[3]
except Exception:
self.image_publisher = None | def _process_image_id(self) | Split image id into component values.
Example: SUSE:SLES:12-SP3:2018.01.04
Publisher:Offer:Sku:Version
Raises:
If image_id is not a valid format. | 2.555519 | 1.974639 | 1.29417 |
self.ip_config_name = ''.join([
self.running_instance_id, '-ip-config'
])
self.nic_name = ''.join([self.running_instance_id, '-nic'])
self.public_ip_name = ''.join([self.running_instance_id, '-public-ip']) | def _set_default_resource_names(self) | Generate names for resources based on the running_instance_id. | 3.350722 | 2.415655 | 1.387086 |
instance = self._get_instance()
image_info = instance.storage_profile.image_reference
if image_info.publisher:
self.image_id = ':'.join([
image_info.publisher, image_info.offer,
image_info.sku, image_info.version
])
else:
self.image_id = image_info.id.rsplit('/', maxsplit=1)[1] | def _set_image_id(self) | If an existing instance is used get image id from deployment. | 2.879179 | 2.587528 | 1.112714 |
try:
ip_address = self.network.public_ip_addresses.get(
self.running_instance_id, self.public_ip_name
).ip_address
except Exception:
try:
ip_address = self.network.network_interfaces.get(
self.running_instance_id, self.nic_name
).ip_configurations[0].private_ip_address
except Exception as error:
raise AzureCloudException(
'Unable to retrieve instance IP address: {0}.'.format(
error
)
)
self.instance_ip = ip_address | def _set_instance_ip(self) | Get the IP address based on instance ID.
If public IP address not found attempt to get private IP. | 2.735231 | 2.627294 | 1.041083 |
try:
vm_start = self.compute.virtual_machines.start(
self.running_instance_id, self.running_instance_id
)
except Exception as error:
raise AzureCloudException(
'Unable to start instance: {0}.'.format(error)
)
vm_start.wait() | def _start_instance(self) | Start the instance. | 4.441866 | 4.08606 | 1.087078 |
try:
vm_stop = self.compute.virtual_machines.power_off(
self.running_instance_id, self.running_instance_id
)
except Exception as error:
raise AzureCloudException(
'Unable to stop instance: {0}.'.format(error)
)
vm_stop.wait() | def _stop_instance(self) | Stop the instance. | 4.397306 | 4.182819 | 1.051278 |
try:
self.resource.resource_groups.delete(self.running_instance_id)
except Exception as error:
raise AzureCloudException(
'Unable to terminate resource group: {0}.'.format(error)
) | def _terminate_instance(self) | Terminate the resource group and instance. | 6.833002 | 5.174794 | 1.320439 |
''' Determines preferred existing id based on curie prefix in the ranking list '''
ranking = [
'CHEBI',
'NCBITaxon',
'COGPO',
'CAO',
'DICOM',
'UBERON',
'NLX',
'NLXANAT',
'NLXCELL',
'NLXFUNC',
'NLXINV',
'NLXORG',
'NLXRES',
'NLXSUB'
'BIRNLEX',
'SAO',
'NDA.CDE',
'PR',
'IAO',
'NIFEXT',
'OEN',
'ILX',
]
mock_rank = ranking[::-1]
score = []
old_pref_index = None
for i, d in enumerate(data['existing_ids']):
if not d.get('preferred'): # db allows None or '' which will cause a problem
d['preferred'] = 0
if int(d['preferred']) == 1:
old_pref_index = i
if d.get('curie'):
pref = d['curie'].split(':')[0]
if pref in mock_rank:
score.append(mock_rank.index(pref))
else:
score.append(-1)
else:
score.append(-1)
new_pref_index = score.index(max(score))
new_pref_iri = data['existing_ids'][new_pref_index]['iri']
if new_pref_iri.rsplit('/', 1)[0] == 'http://uri.interlex.org/base':
if old_pref_index:
if old_pref_index != new_pref_index:
return data
for e in data['existing_ids']:
e['preferred'] = 0
data['existing_ids'][new_pref_index]['preferred'] = 1
return data | def preferred_change(data) | Determines preferred existing id based on curie prefix in the ranking list | 3.873472 | 3.420182 | 1.132534 |
if context.obj is None:
context.obj = {}
context.obj['no_color'] = no_color | def main(context, no_color) | Ipa provides a Python API and command line utility for testing images.
It can be used to test images in the Public Cloud (AWS, Azure, GCE, etc.). | 2.961859 | 3.978512 | 0.744464 |
if context.obj is None:
context.obj = {}
context.obj['history_log'] = history_log
if context.invoked_subcommand is None:
context.invoke(show, item=1) | def results(context, history_log) | Process provided history log and results files. | 3.671419 | 3.950237 | 0.929417 |
history_log = context.obj['history_log']
no_color = context.obj['no_color']
with open(history_log, 'r') as f:
# Get history items
history_items = f.readlines()
if items:
# Split comma separated list and cast indices to integer.
items = [int(item) for item in items.split(',')]
lines = []
for index in items:
lines.append(history_items[len(history_items) - index])
history_items = lines
with tempfile.TemporaryDirectory() as temp_dir:
for item in history_items:
# Copy log and results file,
# update results file with relative path.
archive_history_item(item, temp_dir, no_color)
file_name = ''.join([name, '.tar.gz'])
archive_path = os.path.join(path, file_name)
with tarfile.open(archive_path, "w:gz") as tar:
# Create tar archive
tar.add(temp_dir, arcname='results')
if clear_log:
if items:
# Remove duplicates to prevent unwanted deletion.
items = list(set(items))
# Must delete items from bottom to top of history file
# to preserve indices. (Index 0 is last item in file)
items.sort()
for index in items:
context.invoke(delete, item=index)
else:
context.invoke(clear)
click.echo(
'Exported results history to archive: {0}'.format(archive_path)
) | def archive(context, clear_log, items, path, name) | Archive the history log and all results/log files.
After archive is created optionally clear the history log. | 3.805888 | 3.684416 | 1.032969 |
history_log = context.obj['history_log']
no_color = context.obj['no_color']
try:
with open(history_log, 'r+') as f:
lines = f.readlines()
history = lines.pop(len(lines) - item)
f.seek(0)
f.write(''.join(lines))
f.flush()
f.truncate()
except IndexError:
echo_style(
'History result at index %s does not exist.' % item,
no_color,
fg='red'
)
sys.exit(1)
except Exception as error:
echo_style(
'Unable to delete result item {0}. {1}'.format(item, error),
no_color,
fg='red'
)
sys.exit(1)
log_file = get_log_file_from_item(history)
try:
os.remove(log_file)
except Exception:
echo_style(
'Unable to delete results file for item {0}.'.format(item),
no_color,
fg='red'
)
try:
os.remove(log_file.rsplit('.', 1)[0] + '.results')
except Exception:
echo_style(
'Unable to delete log file for item {0}.'.format(item),
no_color,
fg='red'
) | def delete(context, item) | Delete the specified history item from the history log. | 2.385983 | 2.297978 | 1.038297 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.