id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
17,300
lambdamusic/Ontospy
ontospy/core/entities.py
RDF_Entity._build_qname
def _build_qname(self, uri=None, namespaces=None): """ extracts a qualified name for a uri """ if not uri: uri = self.uri if not namespaces: namespaces = self.namespaces return uri2niceString(uri, namespaces)
python
def _build_qname(self, uri=None, namespaces=None): """ extracts a qualified name for a uri """ if not uri: uri = self.uri if not namespaces: namespaces = self.namespaces return uri2niceString(uri, namespaces)
[ "def", "_build_qname", "(", "self", ",", "uri", "=", "None", ",", "namespaces", "=", "None", ")", ":", "if", "not", "uri", ":", "uri", "=", "self", ".", "uri", "if", "not", "namespaces", ":", "namespaces", "=", "self", ".", "namespaces", "return", "uri2niceString", "(", "uri", ",", "namespaces", ")" ]
extracts a qualified name for a uri
[ "extracts", "a", "qualified", "name", "for", "a", "uri" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/entities.py#L93-L99
17,301
lambdamusic/Ontospy
ontospy/core/entities.py
RDF_Entity.ancestors
def ancestors(self, cl=None, noduplicates=True): """ returns all ancestors in the taxonomy """ if not cl: cl = self if cl.parents(): bag = [] for x in cl.parents(): if x.uri != cl.uri: # avoid circular relationships bag += [x] + self.ancestors(x, noduplicates) else: bag += [x] # finally: if noduplicates: return remove_duplicates(bag) else: return bag else: return []
python
def ancestors(self, cl=None, noduplicates=True): """ returns all ancestors in the taxonomy """ if not cl: cl = self if cl.parents(): bag = [] for x in cl.parents(): if x.uri != cl.uri: # avoid circular relationships bag += [x] + self.ancestors(x, noduplicates) else: bag += [x] # finally: if noduplicates: return remove_duplicates(bag) else: return bag else: return []
[ "def", "ancestors", "(", "self", ",", "cl", "=", "None", ",", "noduplicates", "=", "True", ")", ":", "if", "not", "cl", ":", "cl", "=", "self", "if", "cl", ".", "parents", "(", ")", ":", "bag", "=", "[", "]", "for", "x", "in", "cl", ".", "parents", "(", ")", ":", "if", "x", ".", "uri", "!=", "cl", ".", "uri", ":", "# avoid circular relationships", "bag", "+=", "[", "x", "]", "+", "self", ".", "ancestors", "(", "x", ",", "noduplicates", ")", "else", ":", "bag", "+=", "[", "x", "]", "# finally:", "if", "noduplicates", ":", "return", "remove_duplicates", "(", "bag", ")", "else", ":", "return", "bag", "else", ":", "return", "[", "]" ]
returns all ancestors in the taxonomy
[ "returns", "all", "ancestors", "in", "the", "taxonomy" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/entities.py#L114-L131
17,302
lambdamusic/Ontospy
ontospy/core/entities.py
RDF_Entity.descendants
def descendants(self, cl=None, noduplicates=True): """ returns all descendants in the taxonomy """ if not cl: cl = self if cl.children(): bag = [] for x in cl.children(): if x.uri != cl.uri: # avoid circular relationships bag += [x] + self.descendants(x, noduplicates) else: bag += [x] # finally: if noduplicates: return remove_duplicates(bag) else: return bag else: return []
python
def descendants(self, cl=None, noduplicates=True): """ returns all descendants in the taxonomy """ if not cl: cl = self if cl.children(): bag = [] for x in cl.children(): if x.uri != cl.uri: # avoid circular relationships bag += [x] + self.descendants(x, noduplicates) else: bag += [x] # finally: if noduplicates: return remove_duplicates(bag) else: return bag else: return []
[ "def", "descendants", "(", "self", ",", "cl", "=", "None", ",", "noduplicates", "=", "True", ")", ":", "if", "not", "cl", ":", "cl", "=", "self", "if", "cl", ".", "children", "(", ")", ":", "bag", "=", "[", "]", "for", "x", "in", "cl", ".", "children", "(", ")", ":", "if", "x", ".", "uri", "!=", "cl", ".", "uri", ":", "# avoid circular relationships", "bag", "+=", "[", "x", "]", "+", "self", ".", "descendants", "(", "x", ",", "noduplicates", ")", "else", ":", "bag", "+=", "[", "x", "]", "# finally:", "if", "noduplicates", ":", "return", "remove_duplicates", "(", "bag", ")", "else", ":", "return", "bag", "else", ":", "return", "[", "]" ]
returns all descendants in the taxonomy
[ "returns", "all", "descendants", "in", "the", "taxonomy" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/entities.py#L133-L150
17,303
lambdamusic/Ontospy
ontospy/core/entities.py
Ontology.annotations
def annotations(self, qname=True): """ wrapper that returns all triples for an onto. By default resources URIs are transformed into qnames """ if qname: return sorted([(uri2niceString(x, self.namespaces) ), (uri2niceString(y, self.namespaces)), z] for x, y, z in self.triples) else: return sorted(self.triples)
python
def annotations(self, qname=True): """ wrapper that returns all triples for an onto. By default resources URIs are transformed into qnames """ if qname: return sorted([(uri2niceString(x, self.namespaces) ), (uri2niceString(y, self.namespaces)), z] for x, y, z in self.triples) else: return sorted(self.triples)
[ "def", "annotations", "(", "self", ",", "qname", "=", "True", ")", ":", "if", "qname", ":", "return", "sorted", "(", "[", "(", "uri2niceString", "(", "x", ",", "self", ".", "namespaces", ")", ")", ",", "(", "uri2niceString", "(", "y", ",", "self", ".", "namespaces", ")", ")", ",", "z", "]", "for", "x", ",", "y", ",", "z", "in", "self", ".", "triples", ")", "else", ":", "return", "sorted", "(", "self", ".", "triples", ")" ]
wrapper that returns all triples for an onto. By default resources URIs are transformed into qnames
[ "wrapper", "that", "returns", "all", "triples", "for", "an", "onto", ".", "By", "default", "resources", "URIs", "are", "transformed", "into", "qnames" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/entities.py#L243-L253
17,304
lambdamusic/Ontospy
ontospy/core/entities.py
OntoClass.printStats
def printStats(self): """ shortcut to pull out useful info for interactive use """ printDebug("----------------") printDebug("Parents......: %d" % len(self.parents())) printDebug("Children.....: %d" % len(self.children())) printDebug("Ancestors....: %d" % len(self.ancestors())) printDebug("Descendants..: %d" % len(self.descendants())) printDebug("Domain of....: %d" % len(self.domain_of)) printDebug("Range of.....: %d" % len(self.range_of)) printDebug("Instances....: %d" % self.count()) printDebug("----------------")
python
def printStats(self): """ shortcut to pull out useful info for interactive use """ printDebug("----------------") printDebug("Parents......: %d" % len(self.parents())) printDebug("Children.....: %d" % len(self.children())) printDebug("Ancestors....: %d" % len(self.ancestors())) printDebug("Descendants..: %d" % len(self.descendants())) printDebug("Domain of....: %d" % len(self.domain_of)) printDebug("Range of.....: %d" % len(self.range_of)) printDebug("Instances....: %d" % self.count()) printDebug("----------------")
[ "def", "printStats", "(", "self", ")", ":", "printDebug", "(", "\"----------------\"", ")", "printDebug", "(", "\"Parents......: %d\"", "%", "len", "(", "self", ".", "parents", "(", ")", ")", ")", "printDebug", "(", "\"Children.....: %d\"", "%", "len", "(", "self", ".", "children", "(", ")", ")", ")", "printDebug", "(", "\"Ancestors....: %d\"", "%", "len", "(", "self", ".", "ancestors", "(", ")", ")", ")", "printDebug", "(", "\"Descendants..: %d\"", "%", "len", "(", "self", ".", "descendants", "(", ")", ")", ")", "printDebug", "(", "\"Domain of....: %d\"", "%", "len", "(", "self", ".", "domain_of", ")", ")", "printDebug", "(", "\"Range of.....: %d\"", "%", "len", "(", "self", ".", "range_of", ")", ")", "printDebug", "(", "\"Instances....: %d\"", "%", "self", ".", "count", "(", ")", ")", "printDebug", "(", "\"----------------\"", ")" ]
shortcut to pull out useful info for interactive use
[ "shortcut", "to", "pull", "out", "useful", "info", "for", "interactive", "use" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/entities.py#L325-L335
17,305
lambdamusic/Ontospy
ontospy/core/ontospy.py
Ontospy.load_sparql
def load_sparql(self, sparql_endpoint, verbose=False, hide_base_schemas=True, hide_implicit_types=True, hide_implicit_preds=True, credentials=None): """ Set up a SPARQLStore backend as a virtual ontospy graph Note: we're using a 'SPARQLUpdateStore' backend instead of 'SPARQLStore' cause otherwise authentication fails (https://github.com/RDFLib/rdflib/issues/755) @TODO this error seems to be fixed in upcoming rdflib versions https://github.com/RDFLib/rdflib/pull/744 """ try: # graph = rdflib.Graph('SPARQLStore') # graph = rdflib.ConjunctiveGraph('SPARQLStore') graph = rdflib.ConjunctiveGraph('SPARQLUpdateStore') if credentials and type(credentials) == tuple: # https://github.com/RDFLib/rdflib/issues/343 graph.store.setCredentials(credentials[0], credentials[1]) # graph.store.setHTTPAuth('BASIC') # graph.store.setHTTPAuth('DIGEST') graph.open(sparql_endpoint) self.rdflib_graph = graph self.sparql_endpoint = sparql_endpoint self.sources = [sparql_endpoint] self.sparqlHelper = SparqlHelper(self.rdflib_graph, self.sparql_endpoint) self.namespaces = sorted(self.rdflib_graph.namespaces()) except: printDebug("Error trying to connect to Endpoint.") raise
python
def load_sparql(self, sparql_endpoint, verbose=False, hide_base_schemas=True, hide_implicit_types=True, hide_implicit_preds=True, credentials=None): """ Set up a SPARQLStore backend as a virtual ontospy graph Note: we're using a 'SPARQLUpdateStore' backend instead of 'SPARQLStore' cause otherwise authentication fails (https://github.com/RDFLib/rdflib/issues/755) @TODO this error seems to be fixed in upcoming rdflib versions https://github.com/RDFLib/rdflib/pull/744 """ try: # graph = rdflib.Graph('SPARQLStore') # graph = rdflib.ConjunctiveGraph('SPARQLStore') graph = rdflib.ConjunctiveGraph('SPARQLUpdateStore') if credentials and type(credentials) == tuple: # https://github.com/RDFLib/rdflib/issues/343 graph.store.setCredentials(credentials[0], credentials[1]) # graph.store.setHTTPAuth('BASIC') # graph.store.setHTTPAuth('DIGEST') graph.open(sparql_endpoint) self.rdflib_graph = graph self.sparql_endpoint = sparql_endpoint self.sources = [sparql_endpoint] self.sparqlHelper = SparqlHelper(self.rdflib_graph, self.sparql_endpoint) self.namespaces = sorted(self.rdflib_graph.namespaces()) except: printDebug("Error trying to connect to Endpoint.") raise
[ "def", "load_sparql", "(", "self", ",", "sparql_endpoint", ",", "verbose", "=", "False", ",", "hide_base_schemas", "=", "True", ",", "hide_implicit_types", "=", "True", ",", "hide_implicit_preds", "=", "True", ",", "credentials", "=", "None", ")", ":", "try", ":", "# graph = rdflib.Graph('SPARQLStore')", "# graph = rdflib.ConjunctiveGraph('SPARQLStore')", "graph", "=", "rdflib", ".", "ConjunctiveGraph", "(", "'SPARQLUpdateStore'", ")", "if", "credentials", "and", "type", "(", "credentials", ")", "==", "tuple", ":", "# https://github.com/RDFLib/rdflib/issues/343", "graph", ".", "store", ".", "setCredentials", "(", "credentials", "[", "0", "]", ",", "credentials", "[", "1", "]", ")", "# graph.store.setHTTPAuth('BASIC') # graph.store.setHTTPAuth('DIGEST')", "graph", ".", "open", "(", "sparql_endpoint", ")", "self", ".", "rdflib_graph", "=", "graph", "self", ".", "sparql_endpoint", "=", "sparql_endpoint", "self", ".", "sources", "=", "[", "sparql_endpoint", "]", "self", ".", "sparqlHelper", "=", "SparqlHelper", "(", "self", ".", "rdflib_graph", ",", "self", ".", "sparql_endpoint", ")", "self", ".", "namespaces", "=", "sorted", "(", "self", ".", "rdflib_graph", ".", "namespaces", "(", ")", ")", "except", ":", "printDebug", "(", "\"Error trying to connect to Endpoint.\"", ")", "raise" ]
Set up a SPARQLStore backend as a virtual ontospy graph Note: we're using a 'SPARQLUpdateStore' backend instead of 'SPARQLStore' cause otherwise authentication fails (https://github.com/RDFLib/rdflib/issues/755) @TODO this error seems to be fixed in upcoming rdflib versions https://github.com/RDFLib/rdflib/pull/744
[ "Set", "up", "a", "SPARQLStore", "backend", "as", "a", "virtual", "ontospy", "graph" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/ontospy.py#L144-L177
17,306
lambdamusic/Ontospy
ontospy/core/ontospy.py
Ontospy.build_all
def build_all(self, verbose=False, hide_base_schemas=True, hide_implicit_types=True, hide_implicit_preds=True): """ Extract all ontology entities from an RDF graph and construct Python representations of them. """ if verbose: printDebug("Scanning entities...", "green") printDebug("----------", "comment") self.build_ontologies() if verbose: printDebug("Ontologies.........: %d" % len(self.all_ontologies), "comment") self.build_classes(hide_base_schemas, hide_implicit_types) if verbose: printDebug("Classes............: %d" % len(self.all_classes), "comment") self.build_properties(hide_implicit_preds) if verbose: printDebug("Properties.........: %d" % len(self.all_properties), "comment") if verbose: printDebug("..annotation.......: %d" % len(self.all_properties_annotation), "comment") if verbose: printDebug("..datatype.........: %d" % len(self.all_properties_datatype), "comment") if verbose: printDebug("..object...........: %d" % len(self.all_properties_object), "comment") self.build_skos_concepts() if verbose: printDebug("Concepts (SKOS)....: %d" % len(self.all_skos_concepts), "comment") self.build_shapes() if verbose: printDebug("Shapes (SHACL).....: %d" % len(self.all_shapes), "comment") # self.__computeTopLayer() self.__computeInferredProperties() if verbose: printDebug("----------", "comment")
python
def build_all(self, verbose=False, hide_base_schemas=True, hide_implicit_types=True, hide_implicit_preds=True): """ Extract all ontology entities from an RDF graph and construct Python representations of them. """ if verbose: printDebug("Scanning entities...", "green") printDebug("----------", "comment") self.build_ontologies() if verbose: printDebug("Ontologies.........: %d" % len(self.all_ontologies), "comment") self.build_classes(hide_base_schemas, hide_implicit_types) if verbose: printDebug("Classes............: %d" % len(self.all_classes), "comment") self.build_properties(hide_implicit_preds) if verbose: printDebug("Properties.........: %d" % len(self.all_properties), "comment") if verbose: printDebug("..annotation.......: %d" % len(self.all_properties_annotation), "comment") if verbose: printDebug("..datatype.........: %d" % len(self.all_properties_datatype), "comment") if verbose: printDebug("..object...........: %d" % len(self.all_properties_object), "comment") self.build_skos_concepts() if verbose: printDebug("Concepts (SKOS)....: %d" % len(self.all_skos_concepts), "comment") self.build_shapes() if verbose: printDebug("Shapes (SHACL).....: %d" % len(self.all_shapes), "comment") # self.__computeTopLayer() self.__computeInferredProperties() if verbose: printDebug("----------", "comment")
[ "def", "build_all", "(", "self", ",", "verbose", "=", "False", ",", "hide_base_schemas", "=", "True", ",", "hide_implicit_types", "=", "True", ",", "hide_implicit_preds", "=", "True", ")", ":", "if", "verbose", ":", "printDebug", "(", "\"Scanning entities...\"", ",", "\"green\"", ")", "printDebug", "(", "\"----------\"", ",", "\"comment\"", ")", "self", ".", "build_ontologies", "(", ")", "if", "verbose", ":", "printDebug", "(", "\"Ontologies.........: %d\"", "%", "len", "(", "self", ".", "all_ontologies", ")", ",", "\"comment\"", ")", "self", ".", "build_classes", "(", "hide_base_schemas", ",", "hide_implicit_types", ")", "if", "verbose", ":", "printDebug", "(", "\"Classes............: %d\"", "%", "len", "(", "self", ".", "all_classes", ")", ",", "\"comment\"", ")", "self", ".", "build_properties", "(", "hide_implicit_preds", ")", "if", "verbose", ":", "printDebug", "(", "\"Properties.........: %d\"", "%", "len", "(", "self", ".", "all_properties", ")", ",", "\"comment\"", ")", "if", "verbose", ":", "printDebug", "(", "\"..annotation.......: %d\"", "%", "len", "(", "self", ".", "all_properties_annotation", ")", ",", "\"comment\"", ")", "if", "verbose", ":", "printDebug", "(", "\"..datatype.........: %d\"", "%", "len", "(", "self", ".", "all_properties_datatype", ")", ",", "\"comment\"", ")", "if", "verbose", ":", "printDebug", "(", "\"..object...........: %d\"", "%", "len", "(", "self", ".", "all_properties_object", ")", ",", "\"comment\"", ")", "self", ".", "build_skos_concepts", "(", ")", "if", "verbose", ":", "printDebug", "(", "\"Concepts (SKOS)....: %d\"", "%", "len", "(", "self", ".", "all_skos_concepts", ")", ",", "\"comment\"", ")", "self", ".", "build_shapes", "(", ")", "if", "verbose", ":", "printDebug", "(", "\"Shapes (SHACL).....: %d\"", "%", "len", "(", "self", ".", "all_shapes", ")", ",", "\"comment\"", ")", "# self.__computeTopLayer()", "self", ".", "__computeInferredProperties", "(", ")", "if", "verbose", ":", "printDebug", "(", "\"----------\"", ",", "\"comment\"", ")" ]
Extract all ontology entities from an RDF graph and construct Python representations of them.
[ "Extract", "all", "ontology", "entities", "from", "an", "RDF", "graph", "and", "construct", "Python", "representations", "of", "them", "." ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/ontospy.py#L185-L228
17,307
lambdamusic/Ontospy
ontospy/core/ontospy.py
Ontospy.build_ontologies
def build_ontologies(self, exclude_BNodes=False, return_string=False): """ Extract ontology instances info from the graph, then creates python objects for them. Note: often ontology info is nested in structures like this: [ a owl:Ontology ; vann:preferredNamespacePrefix "bsym" ; vann:preferredNamespaceUri "http://bsym.bloomberg.com/sym/" ] Hence there is some logic to deal with these edge cases. """ out = [] qres = self.sparqlHelper.getOntology() if qres: # NOTE: SPARQL returns a list of rdflib.query.ResultRow (~ tuples..) for candidate in qres: if isBlankNode(candidate[0]): if exclude_BNodes: continue else: checkDC_ID = [x for x in self.rdflib_graph.objects( candidate[0], rdflib.namespace.DC.identifier)] if checkDC_ID: out += [Ontology(checkDC_ID[0], namespaces=self.namespaces), ] else: vannprop = rdflib.URIRef( "http://purl.org/vocab/vann/preferredNamespaceUri") vannpref = rdflib.URIRef( "http://purl.org/vocab/vann/preferredNamespacePrefix") checkDC_ID = [x for x in self.rdflib_graph.objects( candidate[0], vannprop)] if checkDC_ID: checkDC_prefix = [ x for x in self.rdflib_graph.objects(candidate[0], vannpref)] if checkDC_prefix: out += [Ontology(checkDC_ID[0], namespaces=self.namespaces, prefPrefix=checkDC_prefix[0])] else: out += [Ontology(checkDC_ID[0], namespaces=self.namespaces)] else: out += [Ontology(candidate[0], namespaces=self.namespaces)] else: pass # printDebug("No owl:Ontologies found") # finally... add all annotations/triples self.all_ontologies = out for onto in self.all_ontologies: onto.triples = self.sparqlHelper.entityTriples(onto.uri) onto._buildGraph()
python
def build_ontologies(self, exclude_BNodes=False, return_string=False): """ Extract ontology instances info from the graph, then creates python objects for them. Note: often ontology info is nested in structures like this: [ a owl:Ontology ; vann:preferredNamespacePrefix "bsym" ; vann:preferredNamespaceUri "http://bsym.bloomberg.com/sym/" ] Hence there is some logic to deal with these edge cases. """ out = [] qres = self.sparqlHelper.getOntology() if qres: # NOTE: SPARQL returns a list of rdflib.query.ResultRow (~ tuples..) for candidate in qres: if isBlankNode(candidate[0]): if exclude_BNodes: continue else: checkDC_ID = [x for x in self.rdflib_graph.objects( candidate[0], rdflib.namespace.DC.identifier)] if checkDC_ID: out += [Ontology(checkDC_ID[0], namespaces=self.namespaces), ] else: vannprop = rdflib.URIRef( "http://purl.org/vocab/vann/preferredNamespaceUri") vannpref = rdflib.URIRef( "http://purl.org/vocab/vann/preferredNamespacePrefix") checkDC_ID = [x for x in self.rdflib_graph.objects( candidate[0], vannprop)] if checkDC_ID: checkDC_prefix = [ x for x in self.rdflib_graph.objects(candidate[0], vannpref)] if checkDC_prefix: out += [Ontology(checkDC_ID[0], namespaces=self.namespaces, prefPrefix=checkDC_prefix[0])] else: out += [Ontology(checkDC_ID[0], namespaces=self.namespaces)] else: out += [Ontology(candidate[0], namespaces=self.namespaces)] else: pass # printDebug("No owl:Ontologies found") # finally... add all annotations/triples self.all_ontologies = out for onto in self.all_ontologies: onto.triples = self.sparqlHelper.entityTriples(onto.uri) onto._buildGraph()
[ "def", "build_ontologies", "(", "self", ",", "exclude_BNodes", "=", "False", ",", "return_string", "=", "False", ")", ":", "out", "=", "[", "]", "qres", "=", "self", ".", "sparqlHelper", ".", "getOntology", "(", ")", "if", "qres", ":", "# NOTE: SPARQL returns a list of rdflib.query.ResultRow (~ tuples..)", "for", "candidate", "in", "qres", ":", "if", "isBlankNode", "(", "candidate", "[", "0", "]", ")", ":", "if", "exclude_BNodes", ":", "continue", "else", ":", "checkDC_ID", "=", "[", "x", "for", "x", "in", "self", ".", "rdflib_graph", ".", "objects", "(", "candidate", "[", "0", "]", ",", "rdflib", ".", "namespace", ".", "DC", ".", "identifier", ")", "]", "if", "checkDC_ID", ":", "out", "+=", "[", "Ontology", "(", "checkDC_ID", "[", "0", "]", ",", "namespaces", "=", "self", ".", "namespaces", ")", ",", "]", "else", ":", "vannprop", "=", "rdflib", ".", "URIRef", "(", "\"http://purl.org/vocab/vann/preferredNamespaceUri\"", ")", "vannpref", "=", "rdflib", ".", "URIRef", "(", "\"http://purl.org/vocab/vann/preferredNamespacePrefix\"", ")", "checkDC_ID", "=", "[", "x", "for", "x", "in", "self", ".", "rdflib_graph", ".", "objects", "(", "candidate", "[", "0", "]", ",", "vannprop", ")", "]", "if", "checkDC_ID", ":", "checkDC_prefix", "=", "[", "x", "for", "x", "in", "self", ".", "rdflib_graph", ".", "objects", "(", "candidate", "[", "0", "]", ",", "vannpref", ")", "]", "if", "checkDC_prefix", ":", "out", "+=", "[", "Ontology", "(", "checkDC_ID", "[", "0", "]", ",", "namespaces", "=", "self", ".", "namespaces", ",", "prefPrefix", "=", "checkDC_prefix", "[", "0", "]", ")", "]", "else", ":", "out", "+=", "[", "Ontology", "(", "checkDC_ID", "[", "0", "]", ",", "namespaces", "=", "self", ".", "namespaces", ")", "]", "else", ":", "out", "+=", "[", "Ontology", "(", "candidate", "[", "0", "]", ",", "namespaces", "=", "self", ".", "namespaces", ")", "]", "else", ":", "pass", "# printDebug(\"No owl:Ontologies found\")", "# finally... add all annotations/triples", "self", ".", "all_ontologies", "=", "out", "for", "onto", "in", "self", ".", "all_ontologies", ":", "onto", ".", "triples", "=", "self", ".", "sparqlHelper", ".", "entityTriples", "(", "onto", ".", "uri", ")", "onto", ".", "_buildGraph", "(", ")" ]
Extract ontology instances info from the graph, then creates python objects for them. Note: often ontology info is nested in structures like this: [ a owl:Ontology ; vann:preferredNamespacePrefix "bsym" ; vann:preferredNamespaceUri "http://bsym.bloomberg.com/sym/" ] Hence there is some logic to deal with these edge cases.
[ "Extract", "ontology", "instances", "info", "from", "the", "graph", "then", "creates", "python", "objects", "for", "them", "." ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/ontospy.py#L230-L286
17,308
lambdamusic/Ontospy
ontospy/core/ontospy.py
Ontospy.build_entity_from_uri
def build_entity_from_uri(self, uri, ontospyClass=None): """ Extract RDF statements having a URI as subject, then instantiate the RDF_Entity Python object so that it can be queried further. Passing <ontospyClass> allows to instantiate a user-defined RDF_Entity subclass. NOTE: the entity is not attached to any index. In future version we may create an index for these (individuals?) keeping into account that any existing model entity could be (re)created this way. """ if not ontospyClass: ontospyClass = RDF_Entity elif not issubclass(ontospyClass, RDF_Entity): click.secho("Error: <%s> is not a subclass of ontospy.RDF_Entity" % str(ontospyClass)) return None else: pass qres = self.sparqlHelper.entityTriples(uri) if qres: entity = ontospyClass(rdflib.URIRef(uri), None, self.namespaces) entity.triples = qres entity._buildGraph() # force construction of mini graph # try to add class info test = entity.getValuesForProperty(rdflib.RDF.type) if test: entity.rdftype = test entity.rdftype_qname = [entity._build_qname(x) for x in test] return entity else: return None
python
def build_entity_from_uri(self, uri, ontospyClass=None): """ Extract RDF statements having a URI as subject, then instantiate the RDF_Entity Python object so that it can be queried further. Passing <ontospyClass> allows to instantiate a user-defined RDF_Entity subclass. NOTE: the entity is not attached to any index. In future version we may create an index for these (individuals?) keeping into account that any existing model entity could be (re)created this way. """ if not ontospyClass: ontospyClass = RDF_Entity elif not issubclass(ontospyClass, RDF_Entity): click.secho("Error: <%s> is not a subclass of ontospy.RDF_Entity" % str(ontospyClass)) return None else: pass qres = self.sparqlHelper.entityTriples(uri) if qres: entity = ontospyClass(rdflib.URIRef(uri), None, self.namespaces) entity.triples = qres entity._buildGraph() # force construction of mini graph # try to add class info test = entity.getValuesForProperty(rdflib.RDF.type) if test: entity.rdftype = test entity.rdftype_qname = [entity._build_qname(x) for x in test] return entity else: return None
[ "def", "build_entity_from_uri", "(", "self", ",", "uri", ",", "ontospyClass", "=", "None", ")", ":", "if", "not", "ontospyClass", ":", "ontospyClass", "=", "RDF_Entity", "elif", "not", "issubclass", "(", "ontospyClass", ",", "RDF_Entity", ")", ":", "click", ".", "secho", "(", "\"Error: <%s> is not a subclass of ontospy.RDF_Entity\"", "%", "str", "(", "ontospyClass", ")", ")", "return", "None", "else", ":", "pass", "qres", "=", "self", ".", "sparqlHelper", ".", "entityTriples", "(", "uri", ")", "if", "qres", ":", "entity", "=", "ontospyClass", "(", "rdflib", ".", "URIRef", "(", "uri", ")", ",", "None", ",", "self", ".", "namespaces", ")", "entity", ".", "triples", "=", "qres", "entity", ".", "_buildGraph", "(", ")", "# force construction of mini graph", "# try to add class info", "test", "=", "entity", ".", "getValuesForProperty", "(", "rdflib", ".", "RDF", ".", "type", ")", "if", "test", ":", "entity", ".", "rdftype", "=", "test", "entity", ".", "rdftype_qname", "=", "[", "entity", ".", "_build_qname", "(", "x", ")", "for", "x", "in", "test", "]", "return", "entity", "else", ":", "return", "None" ]
Extract RDF statements having a URI as subject, then instantiate the RDF_Entity Python object so that it can be queried further. Passing <ontospyClass> allows to instantiate a user-defined RDF_Entity subclass. NOTE: the entity is not attached to any index. In future version we may create an index for these (individuals?) keeping into account that any existing model entity could be (re)created this way.
[ "Extract", "RDF", "statements", "having", "a", "URI", "as", "subject", "then", "instantiate", "the", "RDF_Entity", "Python", "object", "so", "that", "it", "can", "be", "queried", "further", "." ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/ontospy.py#L561-L588
17,309
lambdamusic/Ontospy
ontospy/core/ontospy.py
Ontospy.printClassTree
def printClassTree(self, element=None, showids=False, labels=False, showtype=False): """ Print nicely into stdout the class tree of an ontology Note: indentation is made so that ids up to 3 digits fit in, plus a space. [123]1-- [1]123-- [12]12-- """ TYPE_MARGIN = 11 # length for owl:class etc.. if not element: # first time for x in self.toplayer_classes: printGenericTree(x, 0, showids, labels, showtype, TYPE_MARGIN) else: printGenericTree(element, 0, showids, labels, showtype, TYPE_MARGIN)
python
def printClassTree(self, element=None, showids=False, labels=False, showtype=False): """ Print nicely into stdout the class tree of an ontology Note: indentation is made so that ids up to 3 digits fit in, plus a space. [123]1-- [1]123-- [12]12-- """ TYPE_MARGIN = 11 # length for owl:class etc.. if not element: # first time for x in self.toplayer_classes: printGenericTree(x, 0, showids, labels, showtype, TYPE_MARGIN) else: printGenericTree(element, 0, showids, labels, showtype, TYPE_MARGIN)
[ "def", "printClassTree", "(", "self", ",", "element", "=", "None", ",", "showids", "=", "False", ",", "labels", "=", "False", ",", "showtype", "=", "False", ")", ":", "TYPE_MARGIN", "=", "11", "# length for owl:class etc..", "if", "not", "element", ":", "# first time", "for", "x", "in", "self", ".", "toplayer_classes", ":", "printGenericTree", "(", "x", ",", "0", ",", "showids", ",", "labels", ",", "showtype", ",", "TYPE_MARGIN", ")", "else", ":", "printGenericTree", "(", "element", ",", "0", ",", "showids", ",", "labels", ",", "showtype", ",", "TYPE_MARGIN", ")" ]
Print nicely into stdout the class tree of an ontology Note: indentation is made so that ids up to 3 digits fit in, plus a space. [123]1-- [1]123-- [12]12--
[ "Print", "nicely", "into", "stdout", "the", "class", "tree", "of", "an", "ontology" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/ontospy.py#L1073-L1089
17,310
lambdamusic/Ontospy
ontospy/core/ontospy.py
Ontospy.printPropertyTree
def printPropertyTree(self, element=None, showids=False, labels=False, showtype=False): """ Print nicely into stdout the property tree of an ontology Note: indentation is made so that ids up to 3 digits fit in, plus a space. [123]1-- [1]123-- [12]12-- """ TYPE_MARGIN = 18 # length for owl:AnnotationProperty etc.. if not element: # first time for x in self.toplayer_properties: printGenericTree(x, 0, showids, labels, showtype, TYPE_MARGIN) else: printGenericTree(element, 0, showids, labels, showtype, TYPE_MARGIN)
python
def printPropertyTree(self, element=None, showids=False, labels=False, showtype=False): """ Print nicely into stdout the property tree of an ontology Note: indentation is made so that ids up to 3 digits fit in, plus a space. [123]1-- [1]123-- [12]12-- """ TYPE_MARGIN = 18 # length for owl:AnnotationProperty etc.. if not element: # first time for x in self.toplayer_properties: printGenericTree(x, 0, showids, labels, showtype, TYPE_MARGIN) else: printGenericTree(element, 0, showids, labels, showtype, TYPE_MARGIN)
[ "def", "printPropertyTree", "(", "self", ",", "element", "=", "None", ",", "showids", "=", "False", ",", "labels", "=", "False", ",", "showtype", "=", "False", ")", ":", "TYPE_MARGIN", "=", "18", "# length for owl:AnnotationProperty etc..", "if", "not", "element", ":", "# first time", "for", "x", "in", "self", ".", "toplayer_properties", ":", "printGenericTree", "(", "x", ",", "0", ",", "showids", ",", "labels", ",", "showtype", ",", "TYPE_MARGIN", ")", "else", ":", "printGenericTree", "(", "element", ",", "0", ",", "showids", ",", "labels", ",", "showtype", ",", "TYPE_MARGIN", ")" ]
Print nicely into stdout the property tree of an ontology Note: indentation is made so that ids up to 3 digits fit in, plus a space. [123]1-- [1]123-- [12]12--
[ "Print", "nicely", "into", "stdout", "the", "property", "tree", "of", "an", "ontology" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/ontospy.py#L1091-L1107
17,311
lambdamusic/Ontospy
ontospy/extras/hacks/sketch.py
Sketch.add
def add(self, text="", default_continuousAdd=True): """add some turtle text""" if not text and default_continuousAdd: self.continuousAdd() else: pprefix = "" for x,y in self.rdflib_graph.namespaces(): pprefix += "@prefix %s: <%s> . \n" % (x, y) # add final . if missing if text and (not text.strip().endswith(".")): text += " ." # smart replacements text = text.replace(" sub ", " rdfs:subClassOf ") text = text.replace(" class ", " owl:Class ") # finally self.rdflib_graph.parse(data=pprefix+text, format="turtle")
python
def add(self, text="", default_continuousAdd=True): """add some turtle text""" if not text and default_continuousAdd: self.continuousAdd() else: pprefix = "" for x,y in self.rdflib_graph.namespaces(): pprefix += "@prefix %s: <%s> . \n" % (x, y) # add final . if missing if text and (not text.strip().endswith(".")): text += " ." # smart replacements text = text.replace(" sub ", " rdfs:subClassOf ") text = text.replace(" class ", " owl:Class ") # finally self.rdflib_graph.parse(data=pprefix+text, format="turtle")
[ "def", "add", "(", "self", ",", "text", "=", "\"\"", ",", "default_continuousAdd", "=", "True", ")", ":", "if", "not", "text", "and", "default_continuousAdd", ":", "self", ".", "continuousAdd", "(", ")", "else", ":", "pprefix", "=", "\"\"", "for", "x", ",", "y", "in", "self", ".", "rdflib_graph", ".", "namespaces", "(", ")", ":", "pprefix", "+=", "\"@prefix %s: <%s> . \\n\"", "%", "(", "x", ",", "y", ")", "# add final . if missing", "if", "text", "and", "(", "not", "text", ".", "strip", "(", ")", ".", "endswith", "(", "\".\"", ")", ")", ":", "text", "+=", "\" .\"", "# smart replacements", "text", "=", "text", ".", "replace", "(", "\" sub \"", ",", "\" rdfs:subClassOf \"", ")", "text", "=", "text", ".", "replace", "(", "\" class \"", ",", "\" owl:Class \"", ")", "# finally", "self", ".", "rdflib_graph", ".", "parse", "(", "data", "=", "pprefix", "+", "text", ",", "format", "=", "\"turtle\"", ")" ]
add some turtle text
[ "add", "some", "turtle", "text" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/hacks/sketch.py#L79-L94
17,312
lambdamusic/Ontospy
ontospy/extras/hacks/sketch.py
Sketch.rdf_source
def rdf_source(self, aformat="turtle"): """ Serialize graph using the format required """ if aformat and aformat not in self.SUPPORTED_FORMATS: return "Sorry. Allowed formats are %s" % str(self.SUPPORTED_FORMATS) if aformat == "dot": return self.__serializedDot() else: # use stardard rdf serializations return self.rdflib_graph.serialize(format=aformat)
python
def rdf_source(self, aformat="turtle"): """ Serialize graph using the format required """ if aformat and aformat not in self.SUPPORTED_FORMATS: return "Sorry. Allowed formats are %s" % str(self.SUPPORTED_FORMATS) if aformat == "dot": return self.__serializedDot() else: # use stardard rdf serializations return self.rdflib_graph.serialize(format=aformat)
[ "def", "rdf_source", "(", "self", ",", "aformat", "=", "\"turtle\"", ")", ":", "if", "aformat", "and", "aformat", "not", "in", "self", ".", "SUPPORTED_FORMATS", ":", "return", "\"Sorry. Allowed formats are %s\"", "%", "str", "(", "self", ".", "SUPPORTED_FORMATS", ")", "if", "aformat", "==", "\"dot\"", ":", "return", "self", ".", "__serializedDot", "(", ")", "else", ":", "# use stardard rdf serializations", "return", "self", ".", "rdflib_graph", ".", "serialize", "(", "format", "=", "aformat", ")" ]
Serialize graph using the format required
[ "Serialize", "graph", "using", "the", "format", "required" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/hacks/sketch.py#L122-L132
17,313
lambdamusic/Ontospy
ontospy/extras/hacks/sketch.py
Sketch.omnigraffle
def omnigraffle(self): """ tries to open an export directly in omnigraffle """ temp = self.rdf_source("dot") try: # try to put in the user/tmp folder from os.path import expanduser home = expanduser("~") filename = home + "/tmp/turtle_sketch.dot" f = open(filename, "w") except: filename = "turtle_sketch.dot" f = open(filename, "w") f.write(temp) f.close() try: os.system("open " + filename) except: os.system("start " + filename)
python
def omnigraffle(self): """ tries to open an export directly in omnigraffle """ temp = self.rdf_source("dot") try: # try to put in the user/tmp folder from os.path import expanduser home = expanduser("~") filename = home + "/tmp/turtle_sketch.dot" f = open(filename, "w") except: filename = "turtle_sketch.dot" f = open(filename, "w") f.write(temp) f.close() try: os.system("open " + filename) except: os.system("start " + filename)
[ "def", "omnigraffle", "(", "self", ")", ":", "temp", "=", "self", ".", "rdf_source", "(", "\"dot\"", ")", "try", ":", "# try to put in the user/tmp folder", "from", "os", ".", "path", "import", "expanduser", "home", "=", "expanduser", "(", "\"~\"", ")", "filename", "=", "home", "+", "\"/tmp/turtle_sketch.dot\"", "f", "=", "open", "(", "filename", ",", "\"w\"", ")", "except", ":", "filename", "=", "\"turtle_sketch.dot\"", "f", "=", "open", "(", "filename", ",", "\"w\"", ")", "f", ".", "write", "(", "temp", ")", "f", ".", "close", "(", ")", "try", ":", "os", ".", "system", "(", "\"open \"", "+", "filename", ")", "except", ":", "os", ".", "system", "(", "\"start \"", "+", "filename", ")" ]
tries to open an export directly in omnigraffle
[ "tries", "to", "open", "an", "export", "directly", "in", "omnigraffle" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/hacks/sketch.py#L149-L166
17,314
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
main
def main(): """ standalone line script """ print("Ontospy " + VERSION) Shell()._clear_screen() print(Style.BRIGHT + "** Ontospy Interactive Ontology Browser " + VERSION + " **" + Style.RESET_ALL) # manager.get_or_create_home_repo() Shell().cmdloop() raise SystemExit(1)
python
def main(): """ standalone line script """ print("Ontospy " + VERSION) Shell()._clear_screen() print(Style.BRIGHT + "** Ontospy Interactive Ontology Browser " + VERSION + " **" + Style.RESET_ALL) # manager.get_or_create_home_repo() Shell().cmdloop() raise SystemExit(1)
[ "def", "main", "(", ")", ":", "print", "(", "\"Ontospy \"", "+", "VERSION", ")", "Shell", "(", ")", ".", "_clear_screen", "(", ")", "print", "(", "Style", ".", "BRIGHT", "+", "\"** Ontospy Interactive Ontology Browser \"", "+", "VERSION", "+", "\" **\"", "+", "Style", ".", "RESET_ALL", ")", "# manager.get_or_create_home_repo()", "Shell", "(", ")", ".", "cmdloop", "(", ")", "raise", "SystemExit", "(", "1", ")" ]
standalone line script
[ "standalone", "line", "script" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L1319-L1328
17,315
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
Shell._print
def _print(self, ms, style="TIP"): """ abstraction for managing color printing """ styles1 = {'IMPORTANT': Style.BRIGHT, 'TIP': Style.DIM, 'URI': Style.BRIGHT, 'TEXT': Fore.GREEN, 'MAGENTA': Fore.MAGENTA, 'BLUE': Fore.BLUE, 'GREEN': Fore.GREEN, 'RED': Fore.RED, 'DEFAULT': Style.DIM, } try: print(styles1[style] + ms + Style.RESET_ALL) except: print(styles1['DEFAULT'] + ms + Style.RESET_ALL)
python
def _print(self, ms, style="TIP"): """ abstraction for managing color printing """ styles1 = {'IMPORTANT': Style.BRIGHT, 'TIP': Style.DIM, 'URI': Style.BRIGHT, 'TEXT': Fore.GREEN, 'MAGENTA': Fore.MAGENTA, 'BLUE': Fore.BLUE, 'GREEN': Fore.GREEN, 'RED': Fore.RED, 'DEFAULT': Style.DIM, } try: print(styles1[style] + ms + Style.RESET_ALL) except: print(styles1['DEFAULT'] + ms + Style.RESET_ALL)
[ "def", "_print", "(", "self", ",", "ms", ",", "style", "=", "\"TIP\"", ")", ":", "styles1", "=", "{", "'IMPORTANT'", ":", "Style", ".", "BRIGHT", ",", "'TIP'", ":", "Style", ".", "DIM", ",", "'URI'", ":", "Style", ".", "BRIGHT", ",", "'TEXT'", ":", "Fore", ".", "GREEN", ",", "'MAGENTA'", ":", "Fore", ".", "MAGENTA", ",", "'BLUE'", ":", "Fore", ".", "BLUE", ",", "'GREEN'", ":", "Fore", ".", "GREEN", ",", "'RED'", ":", "Fore", ".", "RED", ",", "'DEFAULT'", ":", "Style", ".", "DIM", ",", "}", "try", ":", "print", "(", "styles1", "[", "style", "]", "+", "ms", "+", "Style", ".", "RESET_ALL", ")", "except", ":", "print", "(", "styles1", "[", "'DEFAULT'", "]", "+", "ms", "+", "Style", ".", "RESET_ALL", ")" ]
abstraction for managing color printing
[ "abstraction", "for", "managing", "color", "printing" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L157-L172
17,316
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
Shell._printM
def _printM(self, messages): """print a list of strings - for the mom used only by stats printout""" if len(messages) == 2: print(Style.BRIGHT + messages[0] + Style.RESET_ALL + Fore.BLUE + messages[1] + Style.RESET_ALL) else: print("Not implemented")
python
def _printM(self, messages): """print a list of strings - for the mom used only by stats printout""" if len(messages) == 2: print(Style.BRIGHT + messages[0] + Style.RESET_ALL + Fore.BLUE + messages[1] + Style.RESET_ALL) else: print("Not implemented")
[ "def", "_printM", "(", "self", ",", "messages", ")", ":", "if", "len", "(", "messages", ")", "==", "2", ":", "print", "(", "Style", ".", "BRIGHT", "+", "messages", "[", "0", "]", "+", "Style", ".", "RESET_ALL", "+", "Fore", ".", "BLUE", "+", "messages", "[", "1", "]", "+", "Style", ".", "RESET_ALL", ")", "else", ":", "print", "(", "\"Not implemented\"", ")" ]
print a list of strings - for the mom used only by stats printout
[ "print", "a", "list", "of", "strings", "-", "for", "the", "mom", "used", "only", "by", "stats", "printout" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L174-L180
17,317
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
Shell._printDescription
def _printDescription(self, hrlinetop=True): """generic method to print out a description""" if hrlinetop: self._print("----------------") NOTFOUND = "[not found]" if self.currentEntity: obj = self.currentEntity['object'] label = obj.bestLabel() or NOTFOUND description = obj.bestDescription() or NOTFOUND print(Style.BRIGHT + "OBJECT TYPE: " + Style.RESET_ALL + Fore.BLACK + uri2niceString(obj.rdftype) + Style.RESET_ALL) print(Style.BRIGHT + "URI : " + Style.RESET_ALL + Fore.GREEN + "<" + unicode(obj.uri) + ">" + Style.RESET_ALL) print(Style.BRIGHT + "TITLE : " + Style.RESET_ALL + Fore.BLACK + label + Style.RESET_ALL) print(Style.BRIGHT + "DESCRIPTION: " + Style.RESET_ALL + Fore.BLACK + description + Style.RESET_ALL) else: self._clear_screen() self._print("Graph: <" + self.current['fullpath'] + ">", 'TIP') self._print("----------------", "TIP") self._printStats(self.current['graph']) for obj in self.current['graph'].all_ontologies: print(Style.BRIGHT + "Ontology URI: " + Style.RESET_ALL + Fore.RED + "<%s>" % str(obj.uri) + Style.RESET_ALL) # self._print("==> Ontology URI: <%s>" % str(obj.uri), "IMPORTANT") # self._print("----------------", "TIP") label = obj.bestLabel() or NOTFOUND description = obj.bestDescription() or NOTFOUND print(Style.BRIGHT + "Title : " + Style.RESET_ALL + Fore.BLACK + label + Style.RESET_ALL) print(Style.BRIGHT + "Description : " + Style.RESET_ALL + Fore.BLACK + description + Style.RESET_ALL) self._print("----------------", "TIP")
python
def _printDescription(self, hrlinetop=True): """generic method to print out a description""" if hrlinetop: self._print("----------------") NOTFOUND = "[not found]" if self.currentEntity: obj = self.currentEntity['object'] label = obj.bestLabel() or NOTFOUND description = obj.bestDescription() or NOTFOUND print(Style.BRIGHT + "OBJECT TYPE: " + Style.RESET_ALL + Fore.BLACK + uri2niceString(obj.rdftype) + Style.RESET_ALL) print(Style.BRIGHT + "URI : " + Style.RESET_ALL + Fore.GREEN + "<" + unicode(obj.uri) + ">" + Style.RESET_ALL) print(Style.BRIGHT + "TITLE : " + Style.RESET_ALL + Fore.BLACK + label + Style.RESET_ALL) print(Style.BRIGHT + "DESCRIPTION: " + Style.RESET_ALL + Fore.BLACK + description + Style.RESET_ALL) else: self._clear_screen() self._print("Graph: <" + self.current['fullpath'] + ">", 'TIP') self._print("----------------", "TIP") self._printStats(self.current['graph']) for obj in self.current['graph'].all_ontologies: print(Style.BRIGHT + "Ontology URI: " + Style.RESET_ALL + Fore.RED + "<%s>" % str(obj.uri) + Style.RESET_ALL) # self._print("==> Ontology URI: <%s>" % str(obj.uri), "IMPORTANT") # self._print("----------------", "TIP") label = obj.bestLabel() or NOTFOUND description = obj.bestDescription() or NOTFOUND print(Style.BRIGHT + "Title : " + Style.RESET_ALL + Fore.BLACK + label + Style.RESET_ALL) print(Style.BRIGHT + "Description : " + Style.RESET_ALL + Fore.BLACK + description + Style.RESET_ALL) self._print("----------------", "TIP")
[ "def", "_printDescription", "(", "self", ",", "hrlinetop", "=", "True", ")", ":", "if", "hrlinetop", ":", "self", ".", "_print", "(", "\"----------------\"", ")", "NOTFOUND", "=", "\"[not found]\"", "if", "self", ".", "currentEntity", ":", "obj", "=", "self", ".", "currentEntity", "[", "'object'", "]", "label", "=", "obj", ".", "bestLabel", "(", ")", "or", "NOTFOUND", "description", "=", "obj", ".", "bestDescription", "(", ")", "or", "NOTFOUND", "print", "(", "Style", ".", "BRIGHT", "+", "\"OBJECT TYPE: \"", "+", "Style", ".", "RESET_ALL", "+", "Fore", ".", "BLACK", "+", "uri2niceString", "(", "obj", ".", "rdftype", ")", "+", "Style", ".", "RESET_ALL", ")", "print", "(", "Style", ".", "BRIGHT", "+", "\"URI : \"", "+", "Style", ".", "RESET_ALL", "+", "Fore", ".", "GREEN", "+", "\"<\"", "+", "unicode", "(", "obj", ".", "uri", ")", "+", "\">\"", "+", "Style", ".", "RESET_ALL", ")", "print", "(", "Style", ".", "BRIGHT", "+", "\"TITLE : \"", "+", "Style", ".", "RESET_ALL", "+", "Fore", ".", "BLACK", "+", "label", "+", "Style", ".", "RESET_ALL", ")", "print", "(", "Style", ".", "BRIGHT", "+", "\"DESCRIPTION: \"", "+", "Style", ".", "RESET_ALL", "+", "Fore", ".", "BLACK", "+", "description", "+", "Style", ".", "RESET_ALL", ")", "else", ":", "self", ".", "_clear_screen", "(", ")", "self", ".", "_print", "(", "\"Graph: <\"", "+", "self", ".", "current", "[", "'fullpath'", "]", "+", "\">\"", ",", "'TIP'", ")", "self", ".", "_print", "(", "\"----------------\"", ",", "\"TIP\"", ")", "self", ".", "_printStats", "(", "self", ".", "current", "[", "'graph'", "]", ")", "for", "obj", "in", "self", ".", "current", "[", "'graph'", "]", ".", "all_ontologies", ":", "print", "(", "Style", ".", "BRIGHT", "+", "\"Ontology URI: \"", "+", "Style", ".", "RESET_ALL", "+", "Fore", ".", "RED", "+", "\"<%s>\"", "%", "str", "(", "obj", ".", "uri", ")", "+", "Style", ".", "RESET_ALL", ")", "# self._print(\"==> Ontology URI: <%s>\" % str(obj.uri), \"IMPORTANT\")", "# self._print(\"----------------\", \"TIP\")", "label", "=", "obj", ".", "bestLabel", "(", ")", "or", "NOTFOUND", "description", "=", "obj", ".", "bestDescription", "(", ")", "or", "NOTFOUND", "print", "(", "Style", ".", "BRIGHT", "+", "\"Title : \"", "+", "Style", ".", "RESET_ALL", "+", "Fore", ".", "BLACK", "+", "label", "+", "Style", ".", "RESET_ALL", ")", "print", "(", "Style", ".", "BRIGHT", "+", "\"Description : \"", "+", "Style", ".", "RESET_ALL", "+", "Fore", ".", "BLACK", "+", "description", "+", "Style", ".", "RESET_ALL", ")", "self", ".", "_print", "(", "\"----------------\"", ",", "\"TIP\"", ")" ]
generic method to print out a description
[ "generic", "method", "to", "print", "out", "a", "description" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L240-L274
17,318
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
Shell._next_ontology
def _next_ontology(self): """Dynamically retrieves the next ontology in the list""" currentfile = self.current['file'] try: idx = self.all_ontologies.index(currentfile) return self.all_ontologies[idx+1] except: return self.all_ontologies[0]
python
def _next_ontology(self): """Dynamically retrieves the next ontology in the list""" currentfile = self.current['file'] try: idx = self.all_ontologies.index(currentfile) return self.all_ontologies[idx+1] except: return self.all_ontologies[0]
[ "def", "_next_ontology", "(", "self", ")", ":", "currentfile", "=", "self", ".", "current", "[", "'file'", "]", "try", ":", "idx", "=", "self", ".", "all_ontologies", ".", "index", "(", "currentfile", ")", "return", "self", ".", "all_ontologies", "[", "idx", "+", "1", "]", "except", ":", "return", "self", ".", "all_ontologies", "[", "0", "]" ]
Dynamically retrieves the next ontology in the list
[ "Dynamically", "retrieves", "the", "next", "ontology", "in", "the", "list" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L519-L526
17,319
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
Shell._load_ontology
def _load_ontology(self, filename, preview_mode=False): """ Loads an ontology Unless preview_mode=True, it is always loaded from the local repository note: if the ontology does not have a cached version, it is created preview_mode: used to pass a URI/path to be inspected without saving it locally """ if not preview_mode: fullpath = self.LOCAL_MODELS + filename g = manager.get_pickled_ontology(filename) if not g: g = manager.do_pickle_ontology(filename) else: fullpath = filename filename = os.path.basename(os.path.normpath(fullpath)) g = Ontospy(fullpath, verbose=True) self.current = {'file': filename, 'fullpath': fullpath, 'graph': g} self.currentEntity = None self._print_entity_intro(g)
python
def _load_ontology(self, filename, preview_mode=False): """ Loads an ontology Unless preview_mode=True, it is always loaded from the local repository note: if the ontology does not have a cached version, it is created preview_mode: used to pass a URI/path to be inspected without saving it locally """ if not preview_mode: fullpath = self.LOCAL_MODELS + filename g = manager.get_pickled_ontology(filename) if not g: g = manager.do_pickle_ontology(filename) else: fullpath = filename filename = os.path.basename(os.path.normpath(fullpath)) g = Ontospy(fullpath, verbose=True) self.current = {'file': filename, 'fullpath': fullpath, 'graph': g} self.currentEntity = None self._print_entity_intro(g)
[ "def", "_load_ontology", "(", "self", ",", "filename", ",", "preview_mode", "=", "False", ")", ":", "if", "not", "preview_mode", ":", "fullpath", "=", "self", ".", "LOCAL_MODELS", "+", "filename", "g", "=", "manager", ".", "get_pickled_ontology", "(", "filename", ")", "if", "not", "g", ":", "g", "=", "manager", ".", "do_pickle_ontology", "(", "filename", ")", "else", ":", "fullpath", "=", "filename", "filename", "=", "os", ".", "path", ".", "basename", "(", "os", ".", "path", ".", "normpath", "(", "fullpath", ")", ")", "g", "=", "Ontospy", "(", "fullpath", ",", "verbose", "=", "True", ")", "self", ".", "current", "=", "{", "'file'", ":", "filename", ",", "'fullpath'", ":", "fullpath", ",", "'graph'", ":", "g", "}", "self", ".", "currentEntity", "=", "None", "self", ".", "_print_entity_intro", "(", "g", ")" ]
Loads an ontology Unless preview_mode=True, it is always loaded from the local repository note: if the ontology does not have a cached version, it is created preview_mode: used to pass a URI/path to be inspected without saving it locally
[ "Loads", "an", "ontology" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L531-L551
17,320
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
Shell._select_property
def _select_property(self, line): """try to match a property and load it""" g = self.current['graph'] if not line: out = g.all_properties using_pattern = False else: using_pattern = True if line.isdigit(): line = int(line) out = g.get_property(line) if out: if type(out) == type([]): choice = self._selectFromList(out, using_pattern, "property") if choice: self.currentEntity = {'name': choice.locale or choice.uri, 'object': choice, 'type': 'property'} else: self.currentEntity = {'name': out.locale or out.uri, 'object': out, 'type': 'property'} # ..finally: if self.currentEntity: self._print_entity_intro(entity=self.currentEntity) else: print("not found")
python
def _select_property(self, line): """try to match a property and load it""" g = self.current['graph'] if not line: out = g.all_properties using_pattern = False else: using_pattern = True if line.isdigit(): line = int(line) out = g.get_property(line) if out: if type(out) == type([]): choice = self._selectFromList(out, using_pattern, "property") if choice: self.currentEntity = {'name': choice.locale or choice.uri, 'object': choice, 'type': 'property'} else: self.currentEntity = {'name': out.locale or out.uri, 'object': out, 'type': 'property'} # ..finally: if self.currentEntity: self._print_entity_intro(entity=self.currentEntity) else: print("not found")
[ "def", "_select_property", "(", "self", ",", "line", ")", ":", "g", "=", "self", ".", "current", "[", "'graph'", "]", "if", "not", "line", ":", "out", "=", "g", ".", "all_properties", "using_pattern", "=", "False", "else", ":", "using_pattern", "=", "True", "if", "line", ".", "isdigit", "(", ")", ":", "line", "=", "int", "(", "line", ")", "out", "=", "g", ".", "get_property", "(", "line", ")", "if", "out", ":", "if", "type", "(", "out", ")", "==", "type", "(", "[", "]", ")", ":", "choice", "=", "self", ".", "_selectFromList", "(", "out", ",", "using_pattern", ",", "\"property\"", ")", "if", "choice", ":", "self", ".", "currentEntity", "=", "{", "'name'", ":", "choice", ".", "locale", "or", "choice", ".", "uri", ",", "'object'", ":", "choice", ",", "'type'", ":", "'property'", "}", "else", ":", "self", ".", "currentEntity", "=", "{", "'name'", ":", "out", ".", "locale", "or", "out", ".", "uri", ",", "'object'", ":", "out", ",", "'type'", ":", "'property'", "}", "# ..finally:", "if", "self", ".", "currentEntity", ":", "self", ".", "_print_entity_intro", "(", "entity", "=", "self", ".", "currentEntity", ")", "else", ":", "print", "(", "\"not found\"", ")" ]
try to match a property and load it
[ "try", "to", "match", "a", "property", "and", "load", "it" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L597-L623
17,321
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
Shell._select_concept
def _select_concept(self, line): """try to match a class and load it""" g = self.current['graph'] if not line: out = g.all_skos_concepts using_pattern = False else: using_pattern = True if line.isdigit(): line = int(line) out = g.get_skos(line) if out: if type(out) == type([]): choice = self._selectFromList(out, using_pattern, "concept") if choice: self.currentEntity = {'name': choice.locale or choice.uri, 'object': choice, 'type': 'concept'} else: self.currentEntity = {'name': out.locale or out.uri, 'object': out, 'type': 'concept'} # ..finally: if self.currentEntity: self._print_entity_intro(entity=self.currentEntity) else: print("not found")
python
def _select_concept(self, line): """try to match a class and load it""" g = self.current['graph'] if not line: out = g.all_skos_concepts using_pattern = False else: using_pattern = True if line.isdigit(): line = int(line) out = g.get_skos(line) if out: if type(out) == type([]): choice = self._selectFromList(out, using_pattern, "concept") if choice: self.currentEntity = {'name': choice.locale or choice.uri, 'object': choice, 'type': 'concept'} else: self.currentEntity = {'name': out.locale or out.uri, 'object': out, 'type': 'concept'} # ..finally: if self.currentEntity: self._print_entity_intro(entity=self.currentEntity) else: print("not found")
[ "def", "_select_concept", "(", "self", ",", "line", ")", ":", "g", "=", "self", ".", "current", "[", "'graph'", "]", "if", "not", "line", ":", "out", "=", "g", ".", "all_skos_concepts", "using_pattern", "=", "False", "else", ":", "using_pattern", "=", "True", "if", "line", ".", "isdigit", "(", ")", ":", "line", "=", "int", "(", "line", ")", "out", "=", "g", ".", "get_skos", "(", "line", ")", "if", "out", ":", "if", "type", "(", "out", ")", "==", "type", "(", "[", "]", ")", ":", "choice", "=", "self", ".", "_selectFromList", "(", "out", ",", "using_pattern", ",", "\"concept\"", ")", "if", "choice", ":", "self", ".", "currentEntity", "=", "{", "'name'", ":", "choice", ".", "locale", "or", "choice", ".", "uri", ",", "'object'", ":", "choice", ",", "'type'", ":", "'concept'", "}", "else", ":", "self", ".", "currentEntity", "=", "{", "'name'", ":", "out", ".", "locale", "or", "out", ".", "uri", ",", "'object'", ":", "out", ",", "'type'", ":", "'concept'", "}", "# ..finally:", "if", "self", ".", "currentEntity", ":", "self", ".", "_print_entity_intro", "(", "entity", "=", "self", ".", "currentEntity", ")", "else", ":", "print", "(", "\"not found\"", ")" ]
try to match a class and load it
[ "try", "to", "match", "a", "class", "and", "load", "it" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L625-L650
17,322
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
Shell.do_visualize
def do_visualize(self, line): """Visualize an ontology - ie wrapper for export command""" if not self.current: self._help_noontology() return line = line.split() try: # from ..viz.builder import action_visualize from ..ontodocs.builder import action_visualize except: self._print("This command requires the ontodocs package: `pip install ontodocs`") return import webbrowser url = action_visualize(args=self.current['file'], fromshell=True) if url: webbrowser.open(url) return
python
def do_visualize(self, line): """Visualize an ontology - ie wrapper for export command""" if not self.current: self._help_noontology() return line = line.split() try: # from ..viz.builder import action_visualize from ..ontodocs.builder import action_visualize except: self._print("This command requires the ontodocs package: `pip install ontodocs`") return import webbrowser url = action_visualize(args=self.current['file'], fromshell=True) if url: webbrowser.open(url) return
[ "def", "do_visualize", "(", "self", ",", "line", ")", ":", "if", "not", "self", ".", "current", ":", "self", ".", "_help_noontology", "(", ")", "return", "line", "=", "line", ".", "split", "(", ")", "try", ":", "# from ..viz.builder import action_visualize", "from", ".", ".", "ontodocs", ".", "builder", "import", "action_visualize", "except", ":", "self", ".", "_print", "(", "\"This command requires the ontodocs package: `pip install ontodocs`\"", ")", "return", "import", "webbrowser", "url", "=", "action_visualize", "(", "args", "=", "self", ".", "current", "[", "'file'", "]", ",", "fromshell", "=", "True", ")", "if", "url", ":", "webbrowser", ".", "open", "(", "url", ")", "return" ]
Visualize an ontology - ie wrapper for export command
[ "Visualize", "an", "ontology", "-", "ie", "wrapper", "for", "export", "command" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L964-L984
17,323
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
Shell.do_import
def do_import(self, line): """Import an ontology""" line = line.split() if line and line[0] == "starter-pack": actions.action_bootstrap() elif line and line[0] == "uri": self._print( "------------------\nEnter a valid graph URI: (e.g. http://www.w3.org/2009/08/skos-reference/skos.rdf)") var = input() if var: if var.startswith("http"): try: actions.action_import(var) except: self._print( "OPS... An Unknown Error Occurred - Aborting installation of <%s>" % var) else: self._print("Not valid. TIP: URIs should start with 'http://'") elif line and line[0] == "file": self._print( "------------------\nEnter a full file path: (e.g. '/Users/mike/Desktop/journals.ttl')") var = input() if var: try: actions.action_import(var) except: self._print( "OPS... An Unknown Error Occurred - Aborting installation of <%s>" % var) elif line and line[0] == "repo": actions.action_webimport() else: self.help_import() self.all_ontologies = manager.get_localontologies() return
python
def do_import(self, line): """Import an ontology""" line = line.split() if line and line[0] == "starter-pack": actions.action_bootstrap() elif line and line[0] == "uri": self._print( "------------------\nEnter a valid graph URI: (e.g. http://www.w3.org/2009/08/skos-reference/skos.rdf)") var = input() if var: if var.startswith("http"): try: actions.action_import(var) except: self._print( "OPS... An Unknown Error Occurred - Aborting installation of <%s>" % var) else: self._print("Not valid. TIP: URIs should start with 'http://'") elif line and line[0] == "file": self._print( "------------------\nEnter a full file path: (e.g. '/Users/mike/Desktop/journals.ttl')") var = input() if var: try: actions.action_import(var) except: self._print( "OPS... An Unknown Error Occurred - Aborting installation of <%s>" % var) elif line and line[0] == "repo": actions.action_webimport() else: self.help_import() self.all_ontologies = manager.get_localontologies() return
[ "def", "do_import", "(", "self", ",", "line", ")", ":", "line", "=", "line", ".", "split", "(", ")", "if", "line", "and", "line", "[", "0", "]", "==", "\"starter-pack\"", ":", "actions", ".", "action_bootstrap", "(", ")", "elif", "line", "and", "line", "[", "0", "]", "==", "\"uri\"", ":", "self", ".", "_print", "(", "\"------------------\\nEnter a valid graph URI: (e.g. http://www.w3.org/2009/08/skos-reference/skos.rdf)\"", ")", "var", "=", "input", "(", ")", "if", "var", ":", "if", "var", ".", "startswith", "(", "\"http\"", ")", ":", "try", ":", "actions", ".", "action_import", "(", "var", ")", "except", ":", "self", ".", "_print", "(", "\"OPS... An Unknown Error Occurred - Aborting installation of <%s>\"", "%", "var", ")", "else", ":", "self", ".", "_print", "(", "\"Not valid. TIP: URIs should start with 'http://'\"", ")", "elif", "line", "and", "line", "[", "0", "]", "==", "\"file\"", ":", "self", ".", "_print", "(", "\"------------------\\nEnter a full file path: (e.g. '/Users/mike/Desktop/journals.ttl')\"", ")", "var", "=", "input", "(", ")", "if", "var", ":", "try", ":", "actions", ".", "action_import", "(", "var", ")", "except", ":", "self", ".", "_print", "(", "\"OPS... An Unknown Error Occurred - Aborting installation of <%s>\"", "%", "var", ")", "elif", "line", "and", "line", "[", "0", "]", "==", "\"repo\"", ":", "actions", ".", "action_webimport", "(", ")", "else", ":", "self", ".", "help_import", "(", ")", "self", ".", "all_ontologies", "=", "manager", ".", "get_localontologies", "(", ")", "return" ]
Import an ontology
[ "Import", "an", "ontology" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L986-L1026
17,324
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
Shell.do_file
def do_file(self, line): """PErform some file operation""" opts = self.FILE_OPTS if not self.all_ontologies: self._help_nofiles() return line = line.split() if not line or line[0] not in opts: self.help_file() return if line[0] == "rename": self._rename_file() elif line[0] == "delete": self._delete_file() else: return
python
def do_file(self, line): """PErform some file operation""" opts = self.FILE_OPTS if not self.all_ontologies: self._help_nofiles() return line = line.split() if not line or line[0] not in opts: self.help_file() return if line[0] == "rename": self._rename_file() elif line[0] == "delete": self._delete_file() else: return
[ "def", "do_file", "(", "self", ",", "line", ")", ":", "opts", "=", "self", ".", "FILE_OPTS", "if", "not", "self", ".", "all_ontologies", ":", "self", ".", "_help_nofiles", "(", ")", "return", "line", "=", "line", ".", "split", "(", ")", "if", "not", "line", "or", "line", "[", "0", "]", "not", "in", "opts", ":", "self", ".", "help_file", "(", ")", "return", "if", "line", "[", "0", "]", "==", "\"rename\"", ":", "self", ".", "_rename_file", "(", ")", "elif", "line", "[", "0", "]", "==", "\"delete\"", ":", "self", ".", "_delete_file", "(", ")", "else", ":", "return" ]
PErform some file operation
[ "PErform", "some", "file", "operation" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L1028-L1047
17,325
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
Shell.do_serialize
def do_serialize(self, line): """Serialize an entity into an RDF flavour""" opts = self.SERIALIZE_OPTS if not self.current: self._help_noontology() return line = line.split() g = self.current['graph'] if not line: line = ['turtle'] if line[0] not in opts: self.help_serialize() return elif self.currentEntity: self.currentEntity['object'].printSerialize(line[0]) else: self._print(g.rdf_source(format=line[0]))
python
def do_serialize(self, line): """Serialize an entity into an RDF flavour""" opts = self.SERIALIZE_OPTS if not self.current: self._help_noontology() return line = line.split() g = self.current['graph'] if not line: line = ['turtle'] if line[0] not in opts: self.help_serialize() return elif self.currentEntity: self.currentEntity['object'].printSerialize(line[0]) else: self._print(g.rdf_source(format=line[0]))
[ "def", "do_serialize", "(", "self", ",", "line", ")", ":", "opts", "=", "self", ".", "SERIALIZE_OPTS", "if", "not", "self", ".", "current", ":", "self", ".", "_help_noontology", "(", ")", "return", "line", "=", "line", ".", "split", "(", ")", "g", "=", "self", ".", "current", "[", "'graph'", "]", "if", "not", "line", ":", "line", "=", "[", "'turtle'", "]", "if", "line", "[", "0", "]", "not", "in", "opts", ":", "self", ".", "help_serialize", "(", ")", "return", "elif", "self", ".", "currentEntity", ":", "self", ".", "currentEntity", "[", "'object'", "]", ".", "printSerialize", "(", "line", "[", "0", "]", ")", "else", ":", "self", ".", "_print", "(", "g", ".", "rdf_source", "(", "format", "=", "line", "[", "0", "]", ")", ")" ]
Serialize an entity into an RDF flavour
[ "Serialize", "an", "entity", "into", "an", "RDF", "flavour" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L1049-L1071
17,326
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
Shell.do_back
def do_back(self, line): "Go back one step. From entity => ontology; from ontology => ontospy top level." if self.currentEntity: self.currentEntity = None self.prompt = _get_prompt(self.current['file']) else: self.current = None self.prompt = _get_prompt()
python
def do_back(self, line): "Go back one step. From entity => ontology; from ontology => ontospy top level." if self.currentEntity: self.currentEntity = None self.prompt = _get_prompt(self.current['file']) else: self.current = None self.prompt = _get_prompt()
[ "def", "do_back", "(", "self", ",", "line", ")", ":", "if", "self", ".", "currentEntity", ":", "self", ".", "currentEntity", "=", "None", "self", ".", "prompt", "=", "_get_prompt", "(", "self", ".", "current", "[", "'file'", "]", ")", "else", ":", "self", ".", "current", "=", "None", "self", ".", "prompt", "=", "_get_prompt", "(", ")" ]
Go back one step. From entity => ontology; from ontology => ontospy top level.
[ "Go", "back", "one", "step", ".", "From", "entity", "=", ">", "ontology", ";", "from", "ontology", "=", ">", "ontospy", "top", "level", "." ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L1100-L1107
17,327
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
Shell.do_zen
def do_zen(self, line): """Inspiring quotes for the working ontologist""" _quote = random.choice(QUOTES) # print(_quote['source']) print(Style.DIM + unicode(_quote['text'])) print(Style.BRIGHT + unicode(_quote['source']) + Style.RESET_ALL)
python
def do_zen(self, line): """Inspiring quotes for the working ontologist""" _quote = random.choice(QUOTES) # print(_quote['source']) print(Style.DIM + unicode(_quote['text'])) print(Style.BRIGHT + unicode(_quote['source']) + Style.RESET_ALL)
[ "def", "do_zen", "(", "self", ",", "line", ")", ":", "_quote", "=", "random", ".", "choice", "(", "QUOTES", ")", "# print(_quote['source'])", "print", "(", "Style", ".", "DIM", "+", "unicode", "(", "_quote", "[", "'text'", "]", ")", ")", "print", "(", "Style", ".", "BRIGHT", "+", "unicode", "(", "_quote", "[", "'source'", "]", ")", "+", "Style", ".", "RESET_ALL", ")" ]
Inspiring quotes for the working ontologist
[ "Inspiring", "quotes", "for", "the", "working", "ontologist" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L1114-L1119
17,328
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
Shell.complete_get
def complete_get(self, text, line, begidx, endidx): """completion for find command""" options = self.GET_OPTS if not text: completions = options else: completions = [f for f in options if f.startswith(text) ] return completions
python
def complete_get(self, text, line, begidx, endidx): """completion for find command""" options = self.GET_OPTS if not text: completions = options else: completions = [f for f in options if f.startswith(text) ] return completions
[ "def", "complete_get", "(", "self", ",", "text", ",", "line", ",", "begidx", ",", "endidx", ")", ":", "options", "=", "self", ".", "GET_OPTS", "if", "not", "text", ":", "completions", "=", "options", "else", ":", "completions", "=", "[", "f", "for", "f", "in", "options", "if", "f", ".", "startswith", "(", "text", ")", "]", "return", "completions" ]
completion for find command
[ "completion", "for", "find", "command" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L1234-L1246
17,329
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
Shell.complete_info
def complete_info(self, text, line, begidx, endidx): """completion for info command""" opts = self.INFO_OPTS if not text: completions = opts else: completions = [f for f in opts if f.startswith(text) ] return completions
python
def complete_info(self, text, line, begidx, endidx): """completion for info command""" opts = self.INFO_OPTS if not text: completions = opts else: completions = [f for f in opts if f.startswith(text) ] return completions
[ "def", "complete_info", "(", "self", ",", "text", ",", "line", ",", "begidx", ",", "endidx", ")", ":", "opts", "=", "self", ".", "INFO_OPTS", "if", "not", "text", ":", "completions", "=", "opts", "else", ":", "completions", "=", "[", "f", "for", "f", "in", "opts", "if", "f", ".", "startswith", "(", "text", ")", "]", "return", "completions" ]
completion for info command
[ "completion", "for", "info", "command" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L1248-L1260
17,330
lambdamusic/Ontospy
ontospy/ontodocs/utils.py
build_D3treeStandard
def build_D3treeStandard(old, MAX_DEPTH, level=1, toplayer=None): """ For d3s examples all we need is a json with name, children and size .. eg { "name": "flare", "children": [ { "name": "analytics", "children": [ { "name": "cluster", "children": [ {"name": "AgglomerativeCluster", "size": 3938}, {"name": "CommunityStructure", "size": 3812}, {"name": "HierarchicalCluster", "size": 6714}, {"name": "MergeEdge", "size": 743} ] }, etc... """ out = [] if not old: old = toplayer for x in old: d = {} # print "*" * level, x.label d['qname'] = x.qname d['name'] = x.bestLabel(quotes=False).replace("_", " ") d['objid'] = x.id if x.children() and level < MAX_DEPTH: d['size'] = len(x.children()) + 5 # fake size d['realsize'] = len(x.children()) # real size d['children'] = build_D3treeStandard(x.children(), MAX_DEPTH, level + 1) else: d['size'] = 1 # default size d['realsize'] = 0 # default size out += [d] return out
python
def build_D3treeStandard(old, MAX_DEPTH, level=1, toplayer=None): """ For d3s examples all we need is a json with name, children and size .. eg { "name": "flare", "children": [ { "name": "analytics", "children": [ { "name": "cluster", "children": [ {"name": "AgglomerativeCluster", "size": 3938}, {"name": "CommunityStructure", "size": 3812}, {"name": "HierarchicalCluster", "size": 6714}, {"name": "MergeEdge", "size": 743} ] }, etc... """ out = [] if not old: old = toplayer for x in old: d = {} # print "*" * level, x.label d['qname'] = x.qname d['name'] = x.bestLabel(quotes=False).replace("_", " ") d['objid'] = x.id if x.children() and level < MAX_DEPTH: d['size'] = len(x.children()) + 5 # fake size d['realsize'] = len(x.children()) # real size d['children'] = build_D3treeStandard(x.children(), MAX_DEPTH, level + 1) else: d['size'] = 1 # default size d['realsize'] = 0 # default size out += [d] return out
[ "def", "build_D3treeStandard", "(", "old", ",", "MAX_DEPTH", ",", "level", "=", "1", ",", "toplayer", "=", "None", ")", ":", "out", "=", "[", "]", "if", "not", "old", ":", "old", "=", "toplayer", "for", "x", "in", "old", ":", "d", "=", "{", "}", "# print \"*\" * level, x.label", "d", "[", "'qname'", "]", "=", "x", ".", "qname", "d", "[", "'name'", "]", "=", "x", ".", "bestLabel", "(", "quotes", "=", "False", ")", ".", "replace", "(", "\"_\"", ",", "\" \"", ")", "d", "[", "'objid'", "]", "=", "x", ".", "id", "if", "x", ".", "children", "(", ")", "and", "level", "<", "MAX_DEPTH", ":", "d", "[", "'size'", "]", "=", "len", "(", "x", ".", "children", "(", ")", ")", "+", "5", "# fake size", "d", "[", "'realsize'", "]", "=", "len", "(", "x", ".", "children", "(", ")", ")", "# real size", "d", "[", "'children'", "]", "=", "build_D3treeStandard", "(", "x", ".", "children", "(", ")", ",", "MAX_DEPTH", ",", "level", "+", "1", ")", "else", ":", "d", "[", "'size'", "]", "=", "1", "# default size", "d", "[", "'realsize'", "]", "=", "0", "# default size", "out", "+=", "[", "d", "]", "return", "out" ]
For d3s examples all we need is a json with name, children and size .. eg { "name": "flare", "children": [ { "name": "analytics", "children": [ { "name": "cluster", "children": [ {"name": "AgglomerativeCluster", "size": 3938}, {"name": "CommunityStructure", "size": 3812}, {"name": "HierarchicalCluster", "size": 6714}, {"name": "MergeEdge", "size": 743} ] }, etc...
[ "For", "d3s", "examples", "all", "we", "need", "is", "a", "json", "with", "name", "children", "and", "size", "..", "eg" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/ontodocs/utils.py#L11-L51
17,331
lambdamusic/Ontospy
ontospy/ontodocs/utils.py
build_D3bubbleChart
def build_D3bubbleChart(old, MAX_DEPTH, level=1, toplayer=None): """ Similar to standar d3, but nodes with children need to be duplicated otherwise they are not depicted explicitly but just color coded "name": "all", "children": [ {"name": "Biological Science", "size": 9000}, {"name": "Biological Science", "children": [ {"name": "Biological techniques", "size": 6939}, {"name": "Cell biology", "size": 4166}, {"name": "Drug discovery X", "size": 3620, "children": [ {"name": "Biochemistry X", "size": 4585}, {"name": "Biochemistry X", "size": 4585 }, ]}, {"name": "Drug discovery Y", "size": 3620, "children": [ {"name": "Biochemistry Y", "size": 4585}, {"name": "Biochemistry Y", "size": 4585 }, ]}, {"name": "Drug discovery A", "size": 3620, "children": [ {"name": "Biochemistry A", "size": 4585}, ]}, {"name": "Drug discovery B", "size": 3620, }, ]}, etc... """ out = [] if not old: old = toplayer for x in old: d = {} # print "*" * level, x.label d['qname'] = x.qname d['name'] = x.bestLabel(quotes=False).replace("_", " ") d['objid'] = x.id if x.children() and level < MAX_DEPTH: duplicate_row = {} duplicate_row['qname'] = x.qname duplicate_row['name'] = x.bestLabel(quotes=False).replace("_", " ") duplicate_row['objid'] = x.id duplicate_row['size'] = len(x.children()) + 5 # fake size duplicate_row['realsize'] = len(x.children()) # real size out += [duplicate_row] d['children'] = build_D3bubbleChart(x.children(), MAX_DEPTH, level + 1) else: d['size'] = 1 # default size d['realsize'] = 0 # default size out += [d] return out
python
def build_D3bubbleChart(old, MAX_DEPTH, level=1, toplayer=None): """ Similar to standar d3, but nodes with children need to be duplicated otherwise they are not depicted explicitly but just color coded "name": "all", "children": [ {"name": "Biological Science", "size": 9000}, {"name": "Biological Science", "children": [ {"name": "Biological techniques", "size": 6939}, {"name": "Cell biology", "size": 4166}, {"name": "Drug discovery X", "size": 3620, "children": [ {"name": "Biochemistry X", "size": 4585}, {"name": "Biochemistry X", "size": 4585 }, ]}, {"name": "Drug discovery Y", "size": 3620, "children": [ {"name": "Biochemistry Y", "size": 4585}, {"name": "Biochemistry Y", "size": 4585 }, ]}, {"name": "Drug discovery A", "size": 3620, "children": [ {"name": "Biochemistry A", "size": 4585}, ]}, {"name": "Drug discovery B", "size": 3620, }, ]}, etc... """ out = [] if not old: old = toplayer for x in old: d = {} # print "*" * level, x.label d['qname'] = x.qname d['name'] = x.bestLabel(quotes=False).replace("_", " ") d['objid'] = x.id if x.children() and level < MAX_DEPTH: duplicate_row = {} duplicate_row['qname'] = x.qname duplicate_row['name'] = x.bestLabel(quotes=False).replace("_", " ") duplicate_row['objid'] = x.id duplicate_row['size'] = len(x.children()) + 5 # fake size duplicate_row['realsize'] = len(x.children()) # real size out += [duplicate_row] d['children'] = build_D3bubbleChart(x.children(), MAX_DEPTH, level + 1) else: d['size'] = 1 # default size d['realsize'] = 0 # default size out += [d] return out
[ "def", "build_D3bubbleChart", "(", "old", ",", "MAX_DEPTH", ",", "level", "=", "1", ",", "toplayer", "=", "None", ")", ":", "out", "=", "[", "]", "if", "not", "old", ":", "old", "=", "toplayer", "for", "x", "in", "old", ":", "d", "=", "{", "}", "# print \"*\" * level, x.label", "d", "[", "'qname'", "]", "=", "x", ".", "qname", "d", "[", "'name'", "]", "=", "x", ".", "bestLabel", "(", "quotes", "=", "False", ")", ".", "replace", "(", "\"_\"", ",", "\" \"", ")", "d", "[", "'objid'", "]", "=", "x", ".", "id", "if", "x", ".", "children", "(", ")", "and", "level", "<", "MAX_DEPTH", ":", "duplicate_row", "=", "{", "}", "duplicate_row", "[", "'qname'", "]", "=", "x", ".", "qname", "duplicate_row", "[", "'name'", "]", "=", "x", ".", "bestLabel", "(", "quotes", "=", "False", ")", ".", "replace", "(", "\"_\"", ",", "\" \"", ")", "duplicate_row", "[", "'objid'", "]", "=", "x", ".", "id", "duplicate_row", "[", "'size'", "]", "=", "len", "(", "x", ".", "children", "(", ")", ")", "+", "5", "# fake size", "duplicate_row", "[", "'realsize'", "]", "=", "len", "(", "x", ".", "children", "(", ")", ")", "# real size", "out", "+=", "[", "duplicate_row", "]", "d", "[", "'children'", "]", "=", "build_D3bubbleChart", "(", "x", ".", "children", "(", ")", ",", "MAX_DEPTH", ",", "level", "+", "1", ")", "else", ":", "d", "[", "'size'", "]", "=", "1", "# default size", "d", "[", "'realsize'", "]", "=", "0", "# default size", "out", "+=", "[", "d", "]", "return", "out" ]
Similar to standar d3, but nodes with children need to be duplicated otherwise they are not depicted explicitly but just color coded "name": "all", "children": [ {"name": "Biological Science", "size": 9000}, {"name": "Biological Science", "children": [ {"name": "Biological techniques", "size": 6939}, {"name": "Cell biology", "size": 4166}, {"name": "Drug discovery X", "size": 3620, "children": [ {"name": "Biochemistry X", "size": 4585}, {"name": "Biochemistry X", "size": 4585 }, ]}, {"name": "Drug discovery Y", "size": 3620, "children": [ {"name": "Biochemistry Y", "size": 4585}, {"name": "Biochemistry Y", "size": 4585 }, ]}, {"name": "Drug discovery A", "size": 3620, "children": [ {"name": "Biochemistry A", "size": 4585}, ]}, {"name": "Drug discovery B", "size": 3620, }, ]}, etc...
[ "Similar", "to", "standar", "d3", "but", "nodes", "with", "children", "need", "to", "be", "duplicated", "otherwise", "they", "are", "not", "depicted", "explicitly", "but", "just", "color", "coded" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/ontodocs/utils.py#L63-L113
17,332
lambdamusic/Ontospy
ontospy/ontodocs/viz_factory.py
VizFactory.infer_best_title
def infer_best_title(self): """Selects something usable as a title for an ontospy graph""" if self.ontospy_graph.all_ontologies: return self.ontospy_graph.all_ontologies[0].uri elif self.ontospy_graph.sources: return self.ontospy_graph.sources[0] else: return "Untitled"
python
def infer_best_title(self): """Selects something usable as a title for an ontospy graph""" if self.ontospy_graph.all_ontologies: return self.ontospy_graph.all_ontologies[0].uri elif self.ontospy_graph.sources: return self.ontospy_graph.sources[0] else: return "Untitled"
[ "def", "infer_best_title", "(", "self", ")", ":", "if", "self", ".", "ontospy_graph", ".", "all_ontologies", ":", "return", "self", ".", "ontospy_graph", ".", "all_ontologies", "[", "0", "]", ".", "uri", "elif", "self", ".", "ontospy_graph", ".", "sources", ":", "return", "self", ".", "ontospy_graph", ".", "sources", "[", "0", "]", "else", ":", "return", "\"Untitled\"" ]
Selects something usable as a title for an ontospy graph
[ "Selects", "something", "usable", "as", "a", "title", "for", "an", "ontospy", "graph" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/ontodocs/viz_factory.py#L73-L80
17,333
lambdamusic/Ontospy
ontospy/ontodocs/viz_factory.py
VizFactory.build
def build(self, output_path=""): """method that should be inherited by all vis classes""" self.output_path = self.checkOutputPath(output_path) self._buildStaticFiles() self.final_url = self._buildTemplates() printDebug("Done.", "comment") printDebug("=> %s" % (self.final_url), "comment") return self.final_url
python
def build(self, output_path=""): """method that should be inherited by all vis classes""" self.output_path = self.checkOutputPath(output_path) self._buildStaticFiles() self.final_url = self._buildTemplates() printDebug("Done.", "comment") printDebug("=> %s" % (self.final_url), "comment") return self.final_url
[ "def", "build", "(", "self", ",", "output_path", "=", "\"\"", ")", ":", "self", ".", "output_path", "=", "self", ".", "checkOutputPath", "(", "output_path", ")", "self", ".", "_buildStaticFiles", "(", ")", "self", ".", "final_url", "=", "self", ".", "_buildTemplates", "(", ")", "printDebug", "(", "\"Done.\"", ",", "\"comment\"", ")", "printDebug", "(", "\"=> %s\"", "%", "(", "self", ".", "final_url", ")", ",", "\"comment\"", ")", "return", "self", ".", "final_url" ]
method that should be inherited by all vis classes
[ "method", "that", "should", "be", "inherited", "by", "all", "vis", "classes" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/ontodocs/viz_factory.py#L82-L90
17,334
lambdamusic/Ontospy
ontospy/ontodocs/viz_factory.py
VizFactory._buildTemplates
def _buildTemplates(self): """ do all the things necessary to build the viz should be adapted to work for single-file viz, or multi-files etc. :param output_path: :return: """ # in this case we only have one contents = self._renderTemplate(self.template_name, extraContext=None) # the main url used for opening viz f = self.main_file_name main_url = self._save2File(contents, f, self.output_path) return main_url
python
def _buildTemplates(self): """ do all the things necessary to build the viz should be adapted to work for single-file viz, or multi-files etc. :param output_path: :return: """ # in this case we only have one contents = self._renderTemplate(self.template_name, extraContext=None) # the main url used for opening viz f = self.main_file_name main_url = self._save2File(contents, f, self.output_path) return main_url
[ "def", "_buildTemplates", "(", "self", ")", ":", "# in this case we only have one", "contents", "=", "self", ".", "_renderTemplate", "(", "self", ".", "template_name", ",", "extraContext", "=", "None", ")", "# the main url used for opening viz", "f", "=", "self", ".", "main_file_name", "main_url", "=", "self", ".", "_save2File", "(", "contents", ",", "f", ",", "self", ".", "output_path", ")", "return", "main_url" ]
do all the things necessary to build the viz should be adapted to work for single-file viz, or multi-files etc. :param output_path: :return:
[ "do", "all", "the", "things", "necessary", "to", "build", "the", "viz", "should", "be", "adapted", "to", "work", "for", "single", "-", "file", "viz", "or", "multi", "-", "files", "etc", "." ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/ontodocs/viz_factory.py#L92-L105
17,335
lambdamusic/Ontospy
ontospy/ontodocs/viz_factory.py
VizFactory._build_basic_context
def _build_basic_context(self): """ Return a standard dict used in django as a template context """ # printDebug(str(self.ontospy_graph.toplayer_classes)) topclasses = self.ontospy_graph.toplayer_classes[:] if len(topclasses) < 3: # massage the toplayer! for topclass in self.ontospy_graph.toplayer_classes: for child in topclass.children(): if child not in topclasses: topclasses.append(child) if not self.static_url: self.static_url = "static/" # default context_data = { "STATIC_URL": self.static_url, "ontodocs_version": VERSION, "ontospy_graph": self.ontospy_graph, "topclasses": topclasses, "docs_title": self.title, "namespaces": self.ontospy_graph.namespaces, "stats": self.ontospy_graph.stats(), "sources": self.ontospy_graph.sources, "ontologies": self.ontospy_graph.all_ontologies, "classes": self.ontospy_graph.all_classes, "properties": self.ontospy_graph.all_properties, "objproperties": self.ontospy_graph.all_properties_object, "dataproperties": self.ontospy_graph.all_properties_datatype, "annotationproperties": self.ontospy_graph.all_properties_annotation, "skosConcepts": self.ontospy_graph.all_skos_concepts, "instances": [] } return context_data
python
def _build_basic_context(self): """ Return a standard dict used in django as a template context """ # printDebug(str(self.ontospy_graph.toplayer_classes)) topclasses = self.ontospy_graph.toplayer_classes[:] if len(topclasses) < 3: # massage the toplayer! for topclass in self.ontospy_graph.toplayer_classes: for child in topclass.children(): if child not in topclasses: topclasses.append(child) if not self.static_url: self.static_url = "static/" # default context_data = { "STATIC_URL": self.static_url, "ontodocs_version": VERSION, "ontospy_graph": self.ontospy_graph, "topclasses": topclasses, "docs_title": self.title, "namespaces": self.ontospy_graph.namespaces, "stats": self.ontospy_graph.stats(), "sources": self.ontospy_graph.sources, "ontologies": self.ontospy_graph.all_ontologies, "classes": self.ontospy_graph.all_classes, "properties": self.ontospy_graph.all_properties, "objproperties": self.ontospy_graph.all_properties_object, "dataproperties": self.ontospy_graph.all_properties_datatype, "annotationproperties": self.ontospy_graph.all_properties_annotation, "skosConcepts": self.ontospy_graph.all_skos_concepts, "instances": [] } return context_data
[ "def", "_build_basic_context", "(", "self", ")", ":", "# printDebug(str(self.ontospy_graph.toplayer_classes))", "topclasses", "=", "self", ".", "ontospy_graph", ".", "toplayer_classes", "[", ":", "]", "if", "len", "(", "topclasses", ")", "<", "3", ":", "# massage the toplayer!", "for", "topclass", "in", "self", ".", "ontospy_graph", ".", "toplayer_classes", ":", "for", "child", "in", "topclass", ".", "children", "(", ")", ":", "if", "child", "not", "in", "topclasses", ":", "topclasses", ".", "append", "(", "child", ")", "if", "not", "self", ".", "static_url", ":", "self", ".", "static_url", "=", "\"static/\"", "# default", "context_data", "=", "{", "\"STATIC_URL\"", ":", "self", ".", "static_url", ",", "\"ontodocs_version\"", ":", "VERSION", ",", "\"ontospy_graph\"", ":", "self", ".", "ontospy_graph", ",", "\"topclasses\"", ":", "topclasses", ",", "\"docs_title\"", ":", "self", ".", "title", ",", "\"namespaces\"", ":", "self", ".", "ontospy_graph", ".", "namespaces", ",", "\"stats\"", ":", "self", ".", "ontospy_graph", ".", "stats", "(", ")", ",", "\"sources\"", ":", "self", ".", "ontospy_graph", ".", "sources", ",", "\"ontologies\"", ":", "self", ".", "ontospy_graph", ".", "all_ontologies", ",", "\"classes\"", ":", "self", ".", "ontospy_graph", ".", "all_classes", ",", "\"properties\"", ":", "self", ".", "ontospy_graph", ".", "all_properties", ",", "\"objproperties\"", ":", "self", ".", "ontospy_graph", ".", "all_properties_object", ",", "\"dataproperties\"", ":", "self", ".", "ontospy_graph", ".", "all_properties_datatype", ",", "\"annotationproperties\"", ":", "self", ".", "ontospy_graph", ".", "all_properties_annotation", ",", "\"skosConcepts\"", ":", "self", ".", "ontospy_graph", ".", "all_skos_concepts", ",", "\"instances\"", ":", "[", "]", "}", "return", "context_data" ]
Return a standard dict used in django as a template context
[ "Return", "a", "standard", "dict", "used", "in", "django", "as", "a", "template", "context" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/ontodocs/viz_factory.py#L161-L195
17,336
lambdamusic/Ontospy
ontospy/ontodocs/viz_factory.py
VizFactory.checkOutputPath
def checkOutputPath(self, output_path): """ Create or clean up output path """ if not output_path: # output_path = self.output_path_DEFAULT output_path = os.path.join(self.output_path_DEFAULT, slugify(unicode(self.title))) if os.path.exists(output_path): shutil.rmtree(output_path) os.makedirs(output_path) return output_path
python
def checkOutputPath(self, output_path): """ Create or clean up output path """ if not output_path: # output_path = self.output_path_DEFAULT output_path = os.path.join(self.output_path_DEFAULT, slugify(unicode(self.title))) if os.path.exists(output_path): shutil.rmtree(output_path) os.makedirs(output_path) return output_path
[ "def", "checkOutputPath", "(", "self", ",", "output_path", ")", ":", "if", "not", "output_path", ":", "# output_path = self.output_path_DEFAULT", "output_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "output_path_DEFAULT", ",", "slugify", "(", "unicode", "(", "self", ".", "title", ")", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "output_path", ")", ":", "shutil", ".", "rmtree", "(", "output_path", ")", "os", ".", "makedirs", "(", "output_path", ")", "return", "output_path" ]
Create or clean up output path
[ "Create", "or", "clean", "up", "output", "path" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/ontodocs/viz_factory.py#L205-L216
17,337
lambdamusic/Ontospy
ontospy/ontodocs/viz_factory.py
VizFactory.highlight_code
def highlight_code(self, ontospy_entity): """ produce an html version of Turtle code with syntax highlighted using Pygments CSS """ try: pygments_code = highlight(ontospy_entity.rdf_source(), TurtleLexer(), HtmlFormatter()) pygments_code_css = HtmlFormatter().get_style_defs('.highlight') return { "pygments_code": pygments_code, "pygments_code_css": pygments_code_css } except Exception as e: printDebug("Error: Pygmentize Failed", "red") return {}
python
def highlight_code(self, ontospy_entity): """ produce an html version of Turtle code with syntax highlighted using Pygments CSS """ try: pygments_code = highlight(ontospy_entity.rdf_source(), TurtleLexer(), HtmlFormatter()) pygments_code_css = HtmlFormatter().get_style_defs('.highlight') return { "pygments_code": pygments_code, "pygments_code_css": pygments_code_css } except Exception as e: printDebug("Error: Pygmentize Failed", "red") return {}
[ "def", "highlight_code", "(", "self", ",", "ontospy_entity", ")", ":", "try", ":", "pygments_code", "=", "highlight", "(", "ontospy_entity", ".", "rdf_source", "(", ")", ",", "TurtleLexer", "(", ")", ",", "HtmlFormatter", "(", ")", ")", "pygments_code_css", "=", "HtmlFormatter", "(", ")", ".", "get_style_defs", "(", "'.highlight'", ")", "return", "{", "\"pygments_code\"", ":", "pygments_code", ",", "\"pygments_code_css\"", ":", "pygments_code_css", "}", "except", "Exception", "as", "e", ":", "printDebug", "(", "\"Error: Pygmentize Failed\"", ",", "\"red\"", ")", "return", "{", "}" ]
produce an html version of Turtle code with syntax highlighted using Pygments CSS
[ "produce", "an", "html", "version", "of", "Turtle", "code", "with", "syntax", "highlighted", "using", "Pygments", "CSS" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/ontodocs/viz_factory.py#L218-L233
17,338
lambdamusic/Ontospy
ontospy/extras/sparqlpy.py
SparqlEndpoint.query
def query(self, q, format="", convert=True): """ Generic SELECT query structure. 'q' is the main body of the query. The results passed out are not converted yet: see the 'format' method Results could be iterated using the idiom: for l in obj : do_something_with_line(l) If convert is False, we return the collection of rdflib instances """ lines = ["PREFIX %s: <%s>" % (k, r) for k, r in self.prefixes.iteritems()] lines.extend(q.split("\n")) query = "\n".join(lines) if self.verbose: print(query, "\n\n") return self.__doQuery(query, format, convert)
python
def query(self, q, format="", convert=True): """ Generic SELECT query structure. 'q' is the main body of the query. The results passed out are not converted yet: see the 'format' method Results could be iterated using the idiom: for l in obj : do_something_with_line(l) If convert is False, we return the collection of rdflib instances """ lines = ["PREFIX %s: <%s>" % (k, r) for k, r in self.prefixes.iteritems()] lines.extend(q.split("\n")) query = "\n".join(lines) if self.verbose: print(query, "\n\n") return self.__doQuery(query, format, convert)
[ "def", "query", "(", "self", ",", "q", ",", "format", "=", "\"\"", ",", "convert", "=", "True", ")", ":", "lines", "=", "[", "\"PREFIX %s: <%s>\"", "%", "(", "k", ",", "r", ")", "for", "k", ",", "r", "in", "self", ".", "prefixes", ".", "iteritems", "(", ")", "]", "lines", ".", "extend", "(", "q", ".", "split", "(", "\"\\n\"", ")", ")", "query", "=", "\"\\n\"", ".", "join", "(", "lines", ")", "if", "self", ".", "verbose", ":", "print", "(", "query", ",", "\"\\n\\n\"", ")", "return", "self", ".", "__doQuery", "(", "query", ",", "format", ",", "convert", ")" ]
Generic SELECT query structure. 'q' is the main body of the query. The results passed out are not converted yet: see the 'format' method Results could be iterated using the idiom: for l in obj : do_something_with_line(l) If convert is False, we return the collection of rdflib instances
[ "Generic", "SELECT", "query", "structure", ".", "q", "is", "the", "main", "body", "of", "the", "query", "." ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/sparqlpy.py#L103-L121
17,339
lambdamusic/Ontospy
ontospy/extras/sparqlpy.py
SparqlEndpoint.describe
def describe(self, uri, format="", convert=True): """ A simple DESCRIBE query with no 'where' arguments. 'uri' is the resource you want to describe. TODO: there are some errors with describe queries, due to the results being sent back For the moment we're not using them much.. needs to be tested more. """ lines = ["PREFIX %s: <%s>" % (k, r) for k, r in self.prefixes.iteritems()] if uri.startswith("http://"): lines.extend(["DESCRIBE <%s>" % uri]) else: # it's a shortened uri lines.extend(["DESCRIBE %s" % uri]) query = "\n".join(lines) if self.verbose: print(query, "\n\n") return self.__doQuery(query, format, convert)
python
def describe(self, uri, format="", convert=True): """ A simple DESCRIBE query with no 'where' arguments. 'uri' is the resource you want to describe. TODO: there are some errors with describe queries, due to the results being sent back For the moment we're not using them much.. needs to be tested more. """ lines = ["PREFIX %s: <%s>" % (k, r) for k, r in self.prefixes.iteritems()] if uri.startswith("http://"): lines.extend(["DESCRIBE <%s>" % uri]) else: # it's a shortened uri lines.extend(["DESCRIBE %s" % uri]) query = "\n".join(lines) if self.verbose: print(query, "\n\n") return self.__doQuery(query, format, convert)
[ "def", "describe", "(", "self", ",", "uri", ",", "format", "=", "\"\"", ",", "convert", "=", "True", ")", ":", "lines", "=", "[", "\"PREFIX %s: <%s>\"", "%", "(", "k", ",", "r", ")", "for", "k", ",", "r", "in", "self", ".", "prefixes", ".", "iteritems", "(", ")", "]", "if", "uri", ".", "startswith", "(", "\"http://\"", ")", ":", "lines", ".", "extend", "(", "[", "\"DESCRIBE <%s>\"", "%", "uri", "]", ")", "else", ":", "# it's a shortened uri", "lines", ".", "extend", "(", "[", "\"DESCRIBE %s\"", "%", "uri", "]", ")", "query", "=", "\"\\n\"", ".", "join", "(", "lines", ")", "if", "self", ".", "verbose", ":", "print", "(", "query", ",", "\"\\n\\n\"", ")", "return", "self", ".", "__doQuery", "(", "query", ",", "format", ",", "convert", ")" ]
A simple DESCRIBE query with no 'where' arguments. 'uri' is the resource you want to describe. TODO: there are some errors with describe queries, due to the results being sent back For the moment we're not using them much.. needs to be tested more.
[ "A", "simple", "DESCRIBE", "query", "with", "no", "where", "arguments", ".", "uri", "is", "the", "resource", "you", "want", "to", "describe", "." ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/sparqlpy.py#L125-L144
17,340
lambdamusic/Ontospy
ontospy/extras/sparqlpy.py
SparqlEndpoint.__doQuery
def __doQuery(self, query, format, convert): """ Inner method that does the actual query """ self.__getFormat(format) self.sparql.setQuery(query) if convert: results = self.sparql.query().convert() else: results = self.sparql.query() return results
python
def __doQuery(self, query, format, convert): """ Inner method that does the actual query """ self.__getFormat(format) self.sparql.setQuery(query) if convert: results = self.sparql.query().convert() else: results = self.sparql.query() return results
[ "def", "__doQuery", "(", "self", ",", "query", ",", "format", ",", "convert", ")", ":", "self", ".", "__getFormat", "(", "format", ")", "self", ".", "sparql", ".", "setQuery", "(", "query", ")", "if", "convert", ":", "results", "=", "self", ".", "sparql", ".", "query", "(", ")", ".", "convert", "(", ")", "else", ":", "results", "=", "self", ".", "sparql", ".", "query", "(", ")", "return", "results" ]
Inner method that does the actual query
[ "Inner", "method", "that", "does", "the", "actual", "query" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/sparqlpy.py#L212-L223
17,341
lambdamusic/Ontospy
ontospy/extras/hacks/turtle-cli.py
get_default_preds
def get_default_preds(): """dynamically build autocomplete options based on an external file""" g = ontospy.Ontospy(rdfsschema, text=True, verbose=False, hide_base_schemas=False) classes = [(x.qname, x.bestDescription()) for x in g.all_classes] properties = [(x.qname, x.bestDescription()) for x in g.all_properties] commands = [('exit', 'exits the terminal'), ('show', 'show current buffer')] return rdfschema + owlschema + classes + properties + commands
python
def get_default_preds(): """dynamically build autocomplete options based on an external file""" g = ontospy.Ontospy(rdfsschema, text=True, verbose=False, hide_base_schemas=False) classes = [(x.qname, x.bestDescription()) for x in g.all_classes] properties = [(x.qname, x.bestDescription()) for x in g.all_properties] commands = [('exit', 'exits the terminal'), ('show', 'show current buffer')] return rdfschema + owlschema + classes + properties + commands
[ "def", "get_default_preds", "(", ")", ":", "g", "=", "ontospy", ".", "Ontospy", "(", "rdfsschema", ",", "text", "=", "True", ",", "verbose", "=", "False", ",", "hide_base_schemas", "=", "False", ")", "classes", "=", "[", "(", "x", ".", "qname", ",", "x", ".", "bestDescription", "(", ")", ")", "for", "x", "in", "g", ".", "all_classes", "]", "properties", "=", "[", "(", "x", ".", "qname", ",", "x", ".", "bestDescription", "(", ")", ")", "for", "x", "in", "g", ".", "all_properties", "]", "commands", "=", "[", "(", "'exit'", ",", "'exits the terminal'", ")", ",", "(", "'show'", ",", "'show current buffer'", ")", "]", "return", "rdfschema", "+", "owlschema", "+", "classes", "+", "properties", "+", "commands" ]
dynamically build autocomplete options based on an external file
[ "dynamically", "build", "autocomplete", "options", "based", "on", "an", "external", "file" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/hacks/turtle-cli.py#L42-L48
17,342
lambdamusic/Ontospy
ontospy/extras/hacks/matcher.py
matcher
def matcher(graph1, graph2, confidence=0.5, output_file="matching_results.csv", class_or_prop="classes", verbose=False): """ takes two graphs and matches its classes based on qname, label etc.. @todo extend to properties and skos etc.. """ printDebug("----------\nNow matching...") f = open(output_file, 'wt') counter = 0 try: writer = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC) writer.writerow( ('name 1', 'name 2', 'uri 1', 'uri 2') ) # a) match classes if class_or_prop == "classes": for x in graph1.all_classes: l1 = unicode(x.bestLabel(qname_allowed=True)) for y in graph2.all_classes: l2 = unicode(y.bestLabel(qname_allowed=True)) if similar(l1, l2) > confidence: counter += 1 row = [l1, l2, x.uri, y.uri] writer.writerow([s.encode('utf8') if type(s) is unicode else s for s in row]) if verbose: print("%s ==~== %s" % (l1, l2)) # b) match properties elif class_or_prop == "properties": for x in graph1.all_properties: l1 = unicode(x.bestLabel(qname_allowed=True)) for y in graph2.all_properties: l2 = unicode(y.bestLabel(qname_allowed=True)) if similar(l1, l2) > confidence: counter += 1 row = [l1, l2, x.uri, y.uri] writer.writerow([s.encode('utf8') if type(s) is unicode else s for s in row]) if verbose: print("%s ==~== %s" % (l1, l2)) finally: f.close() printDebug("%d candidates found." % counter)
python
def matcher(graph1, graph2, confidence=0.5, output_file="matching_results.csv", class_or_prop="classes", verbose=False): """ takes two graphs and matches its classes based on qname, label etc.. @todo extend to properties and skos etc.. """ printDebug("----------\nNow matching...") f = open(output_file, 'wt') counter = 0 try: writer = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC) writer.writerow( ('name 1', 'name 2', 'uri 1', 'uri 2') ) # a) match classes if class_or_prop == "classes": for x in graph1.all_classes: l1 = unicode(x.bestLabel(qname_allowed=True)) for y in graph2.all_classes: l2 = unicode(y.bestLabel(qname_allowed=True)) if similar(l1, l2) > confidence: counter += 1 row = [l1, l2, x.uri, y.uri] writer.writerow([s.encode('utf8') if type(s) is unicode else s for s in row]) if verbose: print("%s ==~== %s" % (l1, l2)) # b) match properties elif class_or_prop == "properties": for x in graph1.all_properties: l1 = unicode(x.bestLabel(qname_allowed=True)) for y in graph2.all_properties: l2 = unicode(y.bestLabel(qname_allowed=True)) if similar(l1, l2) > confidence: counter += 1 row = [l1, l2, x.uri, y.uri] writer.writerow([s.encode('utf8') if type(s) is unicode else s for s in row]) if verbose: print("%s ==~== %s" % (l1, l2)) finally: f.close() printDebug("%d candidates found." % counter)
[ "def", "matcher", "(", "graph1", ",", "graph2", ",", "confidence", "=", "0.5", ",", "output_file", "=", "\"matching_results.csv\"", ",", "class_or_prop", "=", "\"classes\"", ",", "verbose", "=", "False", ")", ":", "printDebug", "(", "\"----------\\nNow matching...\"", ")", "f", "=", "open", "(", "output_file", ",", "'wt'", ")", "counter", "=", "0", "try", ":", "writer", "=", "csv", ".", "writer", "(", "f", ",", "quoting", "=", "csv", ".", "QUOTE_NONNUMERIC", ")", "writer", ".", "writerow", "(", "(", "'name 1'", ",", "'name 2'", ",", "'uri 1'", ",", "'uri 2'", ")", ")", "# a) match classes", "if", "class_or_prop", "==", "\"classes\"", ":", "for", "x", "in", "graph1", ".", "all_classes", ":", "l1", "=", "unicode", "(", "x", ".", "bestLabel", "(", "qname_allowed", "=", "True", ")", ")", "for", "y", "in", "graph2", ".", "all_classes", ":", "l2", "=", "unicode", "(", "y", ".", "bestLabel", "(", "qname_allowed", "=", "True", ")", ")", "if", "similar", "(", "l1", ",", "l2", ")", ">", "confidence", ":", "counter", "+=", "1", "row", "=", "[", "l1", ",", "l2", ",", "x", ".", "uri", ",", "y", ".", "uri", "]", "writer", ".", "writerow", "(", "[", "s", ".", "encode", "(", "'utf8'", ")", "if", "type", "(", "s", ")", "is", "unicode", "else", "s", "for", "s", "in", "row", "]", ")", "if", "verbose", ":", "print", "(", "\"%s ==~== %s\"", "%", "(", "l1", ",", "l2", ")", ")", "# b) match properties", "elif", "class_or_prop", "==", "\"properties\"", ":", "for", "x", "in", "graph1", ".", "all_properties", ":", "l1", "=", "unicode", "(", "x", ".", "bestLabel", "(", "qname_allowed", "=", "True", ")", ")", "for", "y", "in", "graph2", ".", "all_properties", ":", "l2", "=", "unicode", "(", "y", ".", "bestLabel", "(", "qname_allowed", "=", "True", ")", ")", "if", "similar", "(", "l1", ",", "l2", ")", ">", "confidence", ":", "counter", "+=", "1", "row", "=", "[", "l1", ",", "l2", ",", "x", ".", "uri", ",", "y", ".", "uri", "]", "writer", ".", "writerow", "(", "[", "s", ".", "encode", "(", "'utf8'", ")", "if", "type", "(", "s", ")", "is", "unicode", "else", "s", "for", "s", "in", "row", "]", ")", "if", "verbose", ":", "print", "(", "\"%s ==~== %s\"", "%", "(", "l1", ",", "l2", ")", ")", "finally", ":", "f", ".", "close", "(", ")", "printDebug", "(", "\"%d candidates found.\"", "%", "counter", ")" ]
takes two graphs and matches its classes based on qname, label etc.. @todo extend to properties and skos etc..
[ "takes", "two", "graphs", "and", "matches", "its", "classes", "based", "on", "qname", "label", "etc", ".." ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/hacks/matcher.py#L69-L122
17,343
lambdamusic/Ontospy
ontospy/core/utils.py
safe_str
def safe_str(u, errors="replace"): """Safely print the given string. If you want to see the code points for unprintable characters then you can use `errors="xmlcharrefreplace"`. http://code.activestate.com/recipes/576602-safe-print/ """ s = u.encode(sys.stdout.encoding or "utf-8", errors) return s
python
def safe_str(u, errors="replace"): """Safely print the given string. If you want to see the code points for unprintable characters then you can use `errors="xmlcharrefreplace"`. http://code.activestate.com/recipes/576602-safe-print/ """ s = u.encode(sys.stdout.encoding or "utf-8", errors) return s
[ "def", "safe_str", "(", "u", ",", "errors", "=", "\"replace\"", ")", ":", "s", "=", "u", ".", "encode", "(", "sys", ".", "stdout", ".", "encoding", "or", "\"utf-8\"", ",", "errors", ")", "return", "s" ]
Safely print the given string. If you want to see the code points for unprintable characters then you can use `errors="xmlcharrefreplace"`. http://code.activestate.com/recipes/576602-safe-print/
[ "Safely", "print", "the", "given", "string", "." ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/utils.py#L41-L49
17,344
lambdamusic/Ontospy
ontospy/core/utils.py
OLD_printDebug
def OLD_printDebug(s, style=None): """ util for printing in colors to sys.stderr stream """ if style == "comment": s = Style.DIM + s + Style.RESET_ALL elif style == "important": s = Style.BRIGHT + s + Style.RESET_ALL elif style == "normal": s = Style.RESET_ALL + s + Style.RESET_ALL elif style == "red": s = Fore.RED + s + Style.RESET_ALL elif style == "green": s = Fore.GREEN + s + Style.RESET_ALL try: print(s, file=sys.stderr) except: pass
python
def OLD_printDebug(s, style=None): """ util for printing in colors to sys.stderr stream """ if style == "comment": s = Style.DIM + s + Style.RESET_ALL elif style == "important": s = Style.BRIGHT + s + Style.RESET_ALL elif style == "normal": s = Style.RESET_ALL + s + Style.RESET_ALL elif style == "red": s = Fore.RED + s + Style.RESET_ALL elif style == "green": s = Fore.GREEN + s + Style.RESET_ALL try: print(s, file=sys.stderr) except: pass
[ "def", "OLD_printDebug", "(", "s", ",", "style", "=", "None", ")", ":", "if", "style", "==", "\"comment\"", ":", "s", "=", "Style", ".", "DIM", "+", "s", "+", "Style", ".", "RESET_ALL", "elif", "style", "==", "\"important\"", ":", "s", "=", "Style", ".", "BRIGHT", "+", "s", "+", "Style", ".", "RESET_ALL", "elif", "style", "==", "\"normal\"", ":", "s", "=", "Style", ".", "RESET_ALL", "+", "s", "+", "Style", ".", "RESET_ALL", "elif", "style", "==", "\"red\"", ":", "s", "=", "Fore", ".", "RED", "+", "s", "+", "Style", ".", "RESET_ALL", "elif", "style", "==", "\"green\"", ":", "s", "=", "Fore", ".", "GREEN", "+", "s", "+", "Style", ".", "RESET_ALL", "try", ":", "print", "(", "s", ",", "file", "=", "sys", ".", "stderr", ")", "except", ":", "pass" ]
util for printing in colors to sys.stderr stream
[ "util", "for", "printing", "in", "colors", "to", "sys", ".", "stderr", "stream" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/utils.py#L175-L192
17,345
lambdamusic/Ontospy
ontospy/core/utils.py
pprint2columns
def pprint2columns(llist, max_length=60): """ llist = a list of strings max_length = if a word is longer than that, for single col display > prints a list in two columns, taking care of alignment too """ if len(llist) == 0: return None col_width = max(len(word) for word in llist) + 2 # padding # llist length must be even, otherwise splitting fails if not len(llist) % 2 == 0: llist += [' '] # add a fake element if col_width > max_length: for el in llist: print(el) else: column1 = llist[:int(len(llist) / 2)] column2 = llist[int(len(llist) / 2):] for c1, c2 in zip(column1, column2): space = " " * (col_width - len(c1)) print("%s%s%s" % (c1, space, c2))
python
def pprint2columns(llist, max_length=60): """ llist = a list of strings max_length = if a word is longer than that, for single col display > prints a list in two columns, taking care of alignment too """ if len(llist) == 0: return None col_width = max(len(word) for word in llist) + 2 # padding # llist length must be even, otherwise splitting fails if not len(llist) % 2 == 0: llist += [' '] # add a fake element if col_width > max_length: for el in llist: print(el) else: column1 = llist[:int(len(llist) / 2)] column2 = llist[int(len(llist) / 2):] for c1, c2 in zip(column1, column2): space = " " * (col_width - len(c1)) print("%s%s%s" % (c1, space, c2))
[ "def", "pprint2columns", "(", "llist", ",", "max_length", "=", "60", ")", ":", "if", "len", "(", "llist", ")", "==", "0", ":", "return", "None", "col_width", "=", "max", "(", "len", "(", "word", ")", "for", "word", "in", "llist", ")", "+", "2", "# padding", "# llist length must be even, otherwise splitting fails", "if", "not", "len", "(", "llist", ")", "%", "2", "==", "0", ":", "llist", "+=", "[", "' '", "]", "# add a fake element", "if", "col_width", ">", "max_length", ":", "for", "el", "in", "llist", ":", "print", "(", "el", ")", "else", ":", "column1", "=", "llist", "[", ":", "int", "(", "len", "(", "llist", ")", "/", "2", ")", "]", "column2", "=", "llist", "[", "int", "(", "len", "(", "llist", ")", "/", "2", ")", ":", "]", "for", "c1", ",", "c2", "in", "zip", "(", "column1", ",", "column2", ")", ":", "space", "=", "\" \"", "*", "(", "col_width", "-", "len", "(", "c1", ")", ")", "print", "(", "\"%s%s%s\"", "%", "(", "c1", ",", "space", ",", "c2", ")", ")" ]
llist = a list of strings max_length = if a word is longer than that, for single col display > prints a list in two columns, taking care of alignment too
[ "llist", "=", "a", "list", "of", "strings", "max_length", "=", "if", "a", "word", "is", "longer", "than", "that", "for", "single", "col", "display" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/utils.py#L195-L219
17,346
lambdamusic/Ontospy
ontospy/core/utils.py
playSound
def playSound(folder, name=""): """ as easy as that """ try: if not name: onlyfiles = [ f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f)) ] name = random.choice(onlyfiles) subprocess.call(["afplay", folder + name]) # subprocess.call(["say", "%d started, batch %d" % (adate, batch)]) except: pass
python
def playSound(folder, name=""): """ as easy as that """ try: if not name: onlyfiles = [ f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f)) ] name = random.choice(onlyfiles) subprocess.call(["afplay", folder + name]) # subprocess.call(["say", "%d started, batch %d" % (adate, batch)]) except: pass
[ "def", "playSound", "(", "folder", ",", "name", "=", "\"\"", ")", ":", "try", ":", "if", "not", "name", ":", "onlyfiles", "=", "[", "f", "for", "f", "in", "os", ".", "listdir", "(", "folder", ")", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "folder", ",", "f", ")", ")", "]", "name", "=", "random", ".", "choice", "(", "onlyfiles", ")", "subprocess", ".", "call", "(", "[", "\"afplay\"", ",", "folder", "+", "name", "]", ")", "# subprocess.call([\"say\", \"%d started, batch %d\" % (adate, batch)])", "except", ":", "pass" ]
as easy as that
[ "as", "easy", "as", "that" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/utils.py#L344-L356
17,347
lambdamusic/Ontospy
ontospy/core/utils.py
truncate
def truncate(data, l=20): "truncate a string" info = (data[:l] + '..') if len(data) > l else data return info
python
def truncate(data, l=20): "truncate a string" info = (data[:l] + '..') if len(data) > l else data return info
[ "def", "truncate", "(", "data", ",", "l", "=", "20", ")", ":", "info", "=", "(", "data", "[", ":", "l", "]", "+", "'..'", ")", "if", "len", "(", "data", ")", ">", "l", "else", "data", "return", "info" ]
truncate a string
[ "truncate", "a", "string" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/utils.py#L359-L362
17,348
lambdamusic/Ontospy
ontospy/core/utils.py
printGenericTree
def printGenericTree(element, level=0, showids=True, labels=False, showtype=True, TYPE_MARGIN=18): """ Print nicely into stdout the taxonomical tree of an ontology. Works irrespectively of whether it's a class or property. Note: indentation is made so that ids up to 3 digits fit in, plus a space. [123]1-- [1]123-- [12]12-- <TYPE_MARGIN> is parametrized so that classes and properties can have different default spacing (eg owl:class vs owl:AnnotationProperty) """ ID_MARGIN = 5 SHORT_TYPES = { "rdf:Property": "rdf:Property", "owl:AnnotationProperty": "owl:Annot.Pr.", "owl:DatatypeProperty": "owl:DatatypePr.", "owl:ObjectProperty": "owl:ObjectPr.", } if showids: _id_ = Fore.BLUE + \ "[%d]%s" % (element.id, " " * (ID_MARGIN - len(str(element.id)))) + \ Fore.RESET elif showtype: _prop = uri2niceString(element.rdftype) try: prop = SHORT_TYPES[_prop] except: prop = _prop _id_ = Fore.BLUE + \ "[%s]%s" % (prop, " " * (TYPE_MARGIN - len(prop))) + Fore.RESET else: _id_ = "" if labels: bestLabel = element.bestLabel(qname_allowed=False) if bestLabel: bestLabel = Fore.MAGENTA + " (\"%s\")" % bestLabel + Fore.RESET else: bestLabel = "" printDebug("%s%s%s%s" % (_id_, "-" * 4 * level, element.qname, bestLabel)) # recursion for sub in element.children(): printGenericTree(sub, (level + 1), showids, labels, showtype, TYPE_MARGIN)
python
def printGenericTree(element, level=0, showids=True, labels=False, showtype=True, TYPE_MARGIN=18): """ Print nicely into stdout the taxonomical tree of an ontology. Works irrespectively of whether it's a class or property. Note: indentation is made so that ids up to 3 digits fit in, plus a space. [123]1-- [1]123-- [12]12-- <TYPE_MARGIN> is parametrized so that classes and properties can have different default spacing (eg owl:class vs owl:AnnotationProperty) """ ID_MARGIN = 5 SHORT_TYPES = { "rdf:Property": "rdf:Property", "owl:AnnotationProperty": "owl:Annot.Pr.", "owl:DatatypeProperty": "owl:DatatypePr.", "owl:ObjectProperty": "owl:ObjectPr.", } if showids: _id_ = Fore.BLUE + \ "[%d]%s" % (element.id, " " * (ID_MARGIN - len(str(element.id)))) + \ Fore.RESET elif showtype: _prop = uri2niceString(element.rdftype) try: prop = SHORT_TYPES[_prop] except: prop = _prop _id_ = Fore.BLUE + \ "[%s]%s" % (prop, " " * (TYPE_MARGIN - len(prop))) + Fore.RESET else: _id_ = "" if labels: bestLabel = element.bestLabel(qname_allowed=False) if bestLabel: bestLabel = Fore.MAGENTA + " (\"%s\")" % bestLabel + Fore.RESET else: bestLabel = "" printDebug("%s%s%s%s" % (_id_, "-" * 4 * level, element.qname, bestLabel)) # recursion for sub in element.children(): printGenericTree(sub, (level + 1), showids, labels, showtype, TYPE_MARGIN)
[ "def", "printGenericTree", "(", "element", ",", "level", "=", "0", ",", "showids", "=", "True", ",", "labels", "=", "False", ",", "showtype", "=", "True", ",", "TYPE_MARGIN", "=", "18", ")", ":", "ID_MARGIN", "=", "5", "SHORT_TYPES", "=", "{", "\"rdf:Property\"", ":", "\"rdf:Property\"", ",", "\"owl:AnnotationProperty\"", ":", "\"owl:Annot.Pr.\"", ",", "\"owl:DatatypeProperty\"", ":", "\"owl:DatatypePr.\"", ",", "\"owl:ObjectProperty\"", ":", "\"owl:ObjectPr.\"", ",", "}", "if", "showids", ":", "_id_", "=", "Fore", ".", "BLUE", "+", "\"[%d]%s\"", "%", "(", "element", ".", "id", ",", "\" \"", "*", "(", "ID_MARGIN", "-", "len", "(", "str", "(", "element", ".", "id", ")", ")", ")", ")", "+", "Fore", ".", "RESET", "elif", "showtype", ":", "_prop", "=", "uri2niceString", "(", "element", ".", "rdftype", ")", "try", ":", "prop", "=", "SHORT_TYPES", "[", "_prop", "]", "except", ":", "prop", "=", "_prop", "_id_", "=", "Fore", ".", "BLUE", "+", "\"[%s]%s\"", "%", "(", "prop", ",", "\" \"", "*", "(", "TYPE_MARGIN", "-", "len", "(", "prop", ")", ")", ")", "+", "Fore", ".", "RESET", "else", ":", "_id_", "=", "\"\"", "if", "labels", ":", "bestLabel", "=", "element", ".", "bestLabel", "(", "qname_allowed", "=", "False", ")", "if", "bestLabel", ":", "bestLabel", "=", "Fore", ".", "MAGENTA", "+", "\" (\\\"%s\\\")\"", "%", "bestLabel", "+", "Fore", ".", "RESET", "else", ":", "bestLabel", "=", "\"\"", "printDebug", "(", "\"%s%s%s%s\"", "%", "(", "_id_", ",", "\"-\"", "*", "4", "*", "level", ",", "element", ".", "qname", ",", "bestLabel", ")", ")", "# recursion", "for", "sub", "in", "element", ".", "children", "(", ")", ":", "printGenericTree", "(", "sub", ",", "(", "level", "+", "1", ")", ",", "showids", ",", "labels", ",", "showtype", ",", "TYPE_MARGIN", ")" ]
Print nicely into stdout the taxonomical tree of an ontology. Works irrespectively of whether it's a class or property. Note: indentation is made so that ids up to 3 digits fit in, plus a space. [123]1-- [1]123-- [12]12-- <TYPE_MARGIN> is parametrized so that classes and properties can have different default spacing (eg owl:class vs owl:AnnotationProperty)
[ "Print", "nicely", "into", "stdout", "the", "taxonomical", "tree", "of", "an", "ontology", "." ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/utils.py#L443-L500
17,349
lambdamusic/Ontospy
ontospy/core/utils.py
firstStringInList
def firstStringInList(literalEntities, prefLanguage="en"): """ from a list of literals, returns the one in prefLanguage if no language specification is available, return first element """ match = "" if len(literalEntities) == 1: match = literalEntities[0] elif len(literalEntities) > 1: for x in literalEntities: if getattr(x, 'language') and getattr(x, 'language') == prefLanguage: match = x if not match: # don't bother about language match = literalEntities[0] return match
python
def firstStringInList(literalEntities, prefLanguage="en"): """ from a list of literals, returns the one in prefLanguage if no language specification is available, return first element """ match = "" if len(literalEntities) == 1: match = literalEntities[0] elif len(literalEntities) > 1: for x in literalEntities: if getattr(x, 'language') and getattr(x, 'language') == prefLanguage: match = x if not match: # don't bother about language match = literalEntities[0] return match
[ "def", "firstStringInList", "(", "literalEntities", ",", "prefLanguage", "=", "\"en\"", ")", ":", "match", "=", "\"\"", "if", "len", "(", "literalEntities", ")", "==", "1", ":", "match", "=", "literalEntities", "[", "0", "]", "elif", "len", "(", "literalEntities", ")", ">", "1", ":", "for", "x", "in", "literalEntities", ":", "if", "getattr", "(", "x", ",", "'language'", ")", "and", "getattr", "(", "x", ",", "'language'", ")", "==", "prefLanguage", ":", "match", "=", "x", "if", "not", "match", ":", "# don't bother about language", "match", "=", "literalEntities", "[", "0", "]", "return", "match" ]
from a list of literals, returns the one in prefLanguage if no language specification is available, return first element
[ "from", "a", "list", "of", "literals", "returns", "the", "one", "in", "prefLanguage", "if", "no", "language", "specification", "is", "available", "return", "first", "element" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/utils.py#L503-L519
17,350
lambdamusic/Ontospy
ontospy/core/utils.py
joinStringsInList
def joinStringsInList(literalEntities, prefLanguage="en"): """ from a list of literals, returns the ones in prefLanguage joined up. if the desired language specification is not available, join all up """ match = [] if len(literalEntities) == 1: return literalEntities[0] elif len(literalEntities) > 1: for x in literalEntities: if getattr(x, 'language') and getattr(x, 'language') == prefLanguage: match.append(x) if not match: # don't bother about language for x in literalEntities: match.append(x) return " - ".join([x for x in match])
python
def joinStringsInList(literalEntities, prefLanguage="en"): """ from a list of literals, returns the ones in prefLanguage joined up. if the desired language specification is not available, join all up """ match = [] if len(literalEntities) == 1: return literalEntities[0] elif len(literalEntities) > 1: for x in literalEntities: if getattr(x, 'language') and getattr(x, 'language') == prefLanguage: match.append(x) if not match: # don't bother about language for x in literalEntities: match.append(x) return " - ".join([x for x in match])
[ "def", "joinStringsInList", "(", "literalEntities", ",", "prefLanguage", "=", "\"en\"", ")", ":", "match", "=", "[", "]", "if", "len", "(", "literalEntities", ")", "==", "1", ":", "return", "literalEntities", "[", "0", "]", "elif", "len", "(", "literalEntities", ")", ">", "1", ":", "for", "x", "in", "literalEntities", ":", "if", "getattr", "(", "x", ",", "'language'", ")", "and", "getattr", "(", "x", ",", "'language'", ")", "==", "prefLanguage", ":", "match", ".", "append", "(", "x", ")", "if", "not", "match", ":", "# don't bother about language", "for", "x", "in", "literalEntities", ":", "match", ".", "append", "(", "x", ")", "return", "\" - \"", ".", "join", "(", "[", "x", "for", "x", "in", "match", "]", ")" ]
from a list of literals, returns the ones in prefLanguage joined up. if the desired language specification is not available, join all up
[ "from", "a", "list", "of", "literals", "returns", "the", "ones", "in", "prefLanguage", "joined", "up", ".", "if", "the", "desired", "language", "specification", "is", "not", "available", "join", "all", "up" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/utils.py#L526-L544
17,351
lambdamusic/Ontospy
ontospy/core/utils.py
sortByNamespacePrefix
def sortByNamespacePrefix(urisList, nsList): """ Given an ordered list of namespaces prefixes, order a list of uris based on that. Eg In [7]: ll Out[7]: [rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'), rdflib.term.URIRef(u'printGenericTreeorg/2000/01/rdf-schema#comment'), rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'), rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#equivalentClass')] In [8]: sortByNamespacePrefix(ll, [OWL.OWLNS, RDFS]) Out[8]: [rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#equivalentClass'), rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#comment'), rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'), rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type')] """ exit = [] urisList = sort_uri_list_by_name(urisList) for ns in nsList: innerexit = [] for uri in urisList: if str(uri).startswith(str(ns)): innerexit += [uri] exit += innerexit # add remaining uris (if any) for uri in urisList: if uri not in exit: exit += [uri] return exit
python
def sortByNamespacePrefix(urisList, nsList): """ Given an ordered list of namespaces prefixes, order a list of uris based on that. Eg In [7]: ll Out[7]: [rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'), rdflib.term.URIRef(u'printGenericTreeorg/2000/01/rdf-schema#comment'), rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'), rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#equivalentClass')] In [8]: sortByNamespacePrefix(ll, [OWL.OWLNS, RDFS]) Out[8]: [rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#equivalentClass'), rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#comment'), rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'), rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type')] """ exit = [] urisList = sort_uri_list_by_name(urisList) for ns in nsList: innerexit = [] for uri in urisList: if str(uri).startswith(str(ns)): innerexit += [uri] exit += innerexit # add remaining uris (if any) for uri in urisList: if uri not in exit: exit += [uri] return exit
[ "def", "sortByNamespacePrefix", "(", "urisList", ",", "nsList", ")", ":", "exit", "=", "[", "]", "urisList", "=", "sort_uri_list_by_name", "(", "urisList", ")", "for", "ns", "in", "nsList", ":", "innerexit", "=", "[", "]", "for", "uri", "in", "urisList", ":", "if", "str", "(", "uri", ")", ".", "startswith", "(", "str", "(", "ns", ")", ")", ":", "innerexit", "+=", "[", "uri", "]", "exit", "+=", "innerexit", "# add remaining uris (if any)", "for", "uri", "in", "urisList", ":", "if", "uri", "not", "in", "exit", ":", "exit", "+=", "[", "uri", "]", "return", "exit" ]
Given an ordered list of namespaces prefixes, order a list of uris based on that. Eg In [7]: ll Out[7]: [rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'), rdflib.term.URIRef(u'printGenericTreeorg/2000/01/rdf-schema#comment'), rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'), rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#equivalentClass')] In [8]: sortByNamespacePrefix(ll, [OWL.OWLNS, RDFS]) Out[8]: [rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#equivalentClass'), rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#comment'), rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'), rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type')]
[ "Given", "an", "ordered", "list", "of", "namespaces", "prefixes", "order", "a", "list", "of", "uris", "based", "on", "that", ".", "Eg" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/utils.py#L547-L581
17,352
lambdamusic/Ontospy
ontospy/core/utils.py
sort_uri_list_by_name
def sort_uri_list_by_name(uri_list, bypassNamespace=False): """ Sorts a list of uris bypassNamespace: based on the last bit (usually the name after the namespace) of a uri It checks whether the last bit is specified using a # or just a /, eg: rdflib.URIRef('http://purl.org/ontology/mo/Vinyl'), rdflib.URIRef('http://purl.org/vocab/frbr/core#Work') """ def get_last_bit(uri_string): try: x = uri_string.split("#")[1] except: x = uri_string.split("/")[-1] return x try: if bypassNamespace: return sorted(uri_list, key=lambda x: get_last_bit(x.__str__())) else: return sorted(uri_list) except: # TODO: do more testing.. maybe use a unicode-safe method instead of __str__ print( "Error in <sort_uri_list_by_name>: possibly a UnicodeEncodeError") return uri_list
python
def sort_uri_list_by_name(uri_list, bypassNamespace=False): """ Sorts a list of uris bypassNamespace: based on the last bit (usually the name after the namespace) of a uri It checks whether the last bit is specified using a # or just a /, eg: rdflib.URIRef('http://purl.org/ontology/mo/Vinyl'), rdflib.URIRef('http://purl.org/vocab/frbr/core#Work') """ def get_last_bit(uri_string): try: x = uri_string.split("#")[1] except: x = uri_string.split("/")[-1] return x try: if bypassNamespace: return sorted(uri_list, key=lambda x: get_last_bit(x.__str__())) else: return sorted(uri_list) except: # TODO: do more testing.. maybe use a unicode-safe method instead of __str__ print( "Error in <sort_uri_list_by_name>: possibly a UnicodeEncodeError") return uri_list
[ "def", "sort_uri_list_by_name", "(", "uri_list", ",", "bypassNamespace", "=", "False", ")", ":", "def", "get_last_bit", "(", "uri_string", ")", ":", "try", ":", "x", "=", "uri_string", ".", "split", "(", "\"#\"", ")", "[", "1", "]", "except", ":", "x", "=", "uri_string", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "return", "x", "try", ":", "if", "bypassNamespace", ":", "return", "sorted", "(", "uri_list", ",", "key", "=", "lambda", "x", ":", "get_last_bit", "(", "x", ".", "__str__", "(", ")", ")", ")", "else", ":", "return", "sorted", "(", "uri_list", ")", "except", ":", "# TODO: do more testing.. maybe use a unicode-safe method instead of __str__", "print", "(", "\"Error in <sort_uri_list_by_name>: possibly a UnicodeEncodeError\"", ")", "return", "uri_list" ]
Sorts a list of uris bypassNamespace: based on the last bit (usually the name after the namespace) of a uri It checks whether the last bit is specified using a # or just a /, eg: rdflib.URIRef('http://purl.org/ontology/mo/Vinyl'), rdflib.URIRef('http://purl.org/vocab/frbr/core#Work')
[ "Sorts", "a", "list", "of", "uris" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/utils.py#L584-L612
17,353
lambdamusic/Ontospy
ontospy/core/utils.py
inferNamespacePrefix
def inferNamespacePrefix(aUri): """ From a URI returns the last bit and simulates a namespace prefix when rendering the ontology. eg from <'http://www.w3.org/2008/05/skos#'> it returns the 'skos' string """ stringa = aUri.__str__() try: prefix = stringa.replace("#", "").split("/")[-1] except: prefix = "" return prefix
python
def inferNamespacePrefix(aUri): """ From a URI returns the last bit and simulates a namespace prefix when rendering the ontology. eg from <'http://www.w3.org/2008/05/skos#'> it returns the 'skos' string """ stringa = aUri.__str__() try: prefix = stringa.replace("#", "").split("/")[-1] except: prefix = "" return prefix
[ "def", "inferNamespacePrefix", "(", "aUri", ")", ":", "stringa", "=", "aUri", ".", "__str__", "(", ")", "try", ":", "prefix", "=", "stringa", ".", "replace", "(", "\"#\"", ",", "\"\"", ")", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "except", ":", "prefix", "=", "\"\"", "return", "prefix" ]
From a URI returns the last bit and simulates a namespace prefix when rendering the ontology. eg from <'http://www.w3.org/2008/05/skos#'> it returns the 'skos' string
[ "From", "a", "URI", "returns", "the", "last", "bit", "and", "simulates", "a", "namespace", "prefix", "when", "rendering", "the", "ontology", "." ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/utils.py#L636-L648
17,354
lambdamusic/Ontospy
ontospy/core/utils.py
niceString2uri
def niceString2uri(aUriString, namespaces=None): """ From a string representing a URI possibly with the namespace qname, returns a URI instance. gold:Citation ==> rdflib.term.URIRef(u'http://purl.org/linguistics/gold/Citation') Namespaces are a list [('xml', rdflib.URIRef('http://www.w3.org/XML/1998/namespace')) ('', rdflib.URIRef('http://cohereweb.net/ontology/cohere.owl#')) (u'owl', rdflib.URIRef('http://www.w3.org/2002/07/owl#')) ('rdfs', rdflib.URIRef('http://www.w3.org/2000/01/rdf-schema#')) ('rdf', rdflib.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#')) (u'xsd', rdflib.URIRef('http://www.w3.org/2001/XMLSchema#'))] """ if not namespaces: namespaces = [] for aNamespaceTuple in namespaces: if aNamespaceTuple[0] and aUriString.find( aNamespaceTuple[0].__str__() + ":") == 0: aUriString_name = aUriString.split(":")[1] return rdflib.term.URIRef(aNamespaceTuple[1] + aUriString_name) # we dont handle the 'base' URI case return rdflib.term.URIRef(aUriString)
python
def niceString2uri(aUriString, namespaces=None): """ From a string representing a URI possibly with the namespace qname, returns a URI instance. gold:Citation ==> rdflib.term.URIRef(u'http://purl.org/linguistics/gold/Citation') Namespaces are a list [('xml', rdflib.URIRef('http://www.w3.org/XML/1998/namespace')) ('', rdflib.URIRef('http://cohereweb.net/ontology/cohere.owl#')) (u'owl', rdflib.URIRef('http://www.w3.org/2002/07/owl#')) ('rdfs', rdflib.URIRef('http://www.w3.org/2000/01/rdf-schema#')) ('rdf', rdflib.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#')) (u'xsd', rdflib.URIRef('http://www.w3.org/2001/XMLSchema#'))] """ if not namespaces: namespaces = [] for aNamespaceTuple in namespaces: if aNamespaceTuple[0] and aUriString.find( aNamespaceTuple[0].__str__() + ":") == 0: aUriString_name = aUriString.split(":")[1] return rdflib.term.URIRef(aNamespaceTuple[1] + aUriString_name) # we dont handle the 'base' URI case return rdflib.term.URIRef(aUriString)
[ "def", "niceString2uri", "(", "aUriString", ",", "namespaces", "=", "None", ")", ":", "if", "not", "namespaces", ":", "namespaces", "=", "[", "]", "for", "aNamespaceTuple", "in", "namespaces", ":", "if", "aNamespaceTuple", "[", "0", "]", "and", "aUriString", ".", "find", "(", "aNamespaceTuple", "[", "0", "]", ".", "__str__", "(", ")", "+", "\":\"", ")", "==", "0", ":", "aUriString_name", "=", "aUriString", ".", "split", "(", "\":\"", ")", "[", "1", "]", "return", "rdflib", ".", "term", ".", "URIRef", "(", "aNamespaceTuple", "[", "1", "]", "+", "aUriString_name", ")", "# we dont handle the 'base' URI case", "return", "rdflib", ".", "term", ".", "URIRef", "(", "aUriString", ")" ]
From a string representing a URI possibly with the namespace qname, returns a URI instance. gold:Citation ==> rdflib.term.URIRef(u'http://purl.org/linguistics/gold/Citation') Namespaces are a list [('xml', rdflib.URIRef('http://www.w3.org/XML/1998/namespace')) ('', rdflib.URIRef('http://cohereweb.net/ontology/cohere.owl#')) (u'owl', rdflib.URIRef('http://www.w3.org/2002/07/owl#')) ('rdfs', rdflib.URIRef('http://www.w3.org/2000/01/rdf-schema#')) ('rdf', rdflib.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#')) (u'xsd', rdflib.URIRef('http://www.w3.org/2001/XMLSchema#'))]
[ "From", "a", "string", "representing", "a", "URI", "possibly", "with", "the", "namespace", "qname", "returns", "a", "URI", "instance", "." ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/utils.py#L728-L755
17,355
lambdamusic/Ontospy
ontospy/core/utils.py
shellPrintOverview
def shellPrintOverview(g, opts={'labels': False}): """ overview of graph invoked from command line @todo add pagination via something like this # import pydoc # pydoc.pager("SOME_VERY_LONG_TEXT") """ ontologies = g.all_ontologies # get opts try: labels = opts['labels'] except: labels = False print(Style.BRIGHT + "Namespaces\n-----------" + Style.RESET_ALL) if g.namespaces: for p, u in g.namespaces: row = Fore.GREEN + "%s" % p + Fore.BLACK + " %s" % u + Fore.RESET print(row) else: printDebug("None found", "comment") print(Style.BRIGHT + "\nOntologies\n-----------" + Style.RESET_ALL) if ontologies: for o in ontologies: o.printTriples() else: printDebug("None found", "comment") print(Style.BRIGHT + "\nClasses\n" + "-" * 10 + Style.RESET_ALL) if g.all_classes: g.printClassTree(showids=False, labels=labels) else: printDebug("None found", "comment") print(Style.BRIGHT + "\nProperties\n" + "-" * 10 + Style.RESET_ALL) if g.all_properties: g.printPropertyTree(showids=False, labels=labels) else: printDebug("None found", "comment") print(Style.BRIGHT + "\nSKOS Concepts\n" + "-" * 10 + Style.RESET_ALL) if g.all_skos_concepts: g.printSkosTree(showids=False, labels=labels) else: printDebug("None found", "comment") print(Style.BRIGHT + "\nSHACL Shapes\n" + "-" * 10 + Style.RESET_ALL) if g.all_shapes: for x in g.all_shapes: printDebug("%s" % (x.qname)) # printDebug("%s" % (x.bestLabel()), "comment") else: printDebug("None found", "comment")
python
def shellPrintOverview(g, opts={'labels': False}): """ overview of graph invoked from command line @todo add pagination via something like this # import pydoc # pydoc.pager("SOME_VERY_LONG_TEXT") """ ontologies = g.all_ontologies # get opts try: labels = opts['labels'] except: labels = False print(Style.BRIGHT + "Namespaces\n-----------" + Style.RESET_ALL) if g.namespaces: for p, u in g.namespaces: row = Fore.GREEN + "%s" % p + Fore.BLACK + " %s" % u + Fore.RESET print(row) else: printDebug("None found", "comment") print(Style.BRIGHT + "\nOntologies\n-----------" + Style.RESET_ALL) if ontologies: for o in ontologies: o.printTriples() else: printDebug("None found", "comment") print(Style.BRIGHT + "\nClasses\n" + "-" * 10 + Style.RESET_ALL) if g.all_classes: g.printClassTree(showids=False, labels=labels) else: printDebug("None found", "comment") print(Style.BRIGHT + "\nProperties\n" + "-" * 10 + Style.RESET_ALL) if g.all_properties: g.printPropertyTree(showids=False, labels=labels) else: printDebug("None found", "comment") print(Style.BRIGHT + "\nSKOS Concepts\n" + "-" * 10 + Style.RESET_ALL) if g.all_skos_concepts: g.printSkosTree(showids=False, labels=labels) else: printDebug("None found", "comment") print(Style.BRIGHT + "\nSHACL Shapes\n" + "-" * 10 + Style.RESET_ALL) if g.all_shapes: for x in g.all_shapes: printDebug("%s" % (x.qname)) # printDebug("%s" % (x.bestLabel()), "comment") else: printDebug("None found", "comment")
[ "def", "shellPrintOverview", "(", "g", ",", "opts", "=", "{", "'labels'", ":", "False", "}", ")", ":", "ontologies", "=", "g", ".", "all_ontologies", "# get opts", "try", ":", "labels", "=", "opts", "[", "'labels'", "]", "except", ":", "labels", "=", "False", "print", "(", "Style", ".", "BRIGHT", "+", "\"Namespaces\\n-----------\"", "+", "Style", ".", "RESET_ALL", ")", "if", "g", ".", "namespaces", ":", "for", "p", ",", "u", "in", "g", ".", "namespaces", ":", "row", "=", "Fore", ".", "GREEN", "+", "\"%s\"", "%", "p", "+", "Fore", ".", "BLACK", "+", "\" %s\"", "%", "u", "+", "Fore", ".", "RESET", "print", "(", "row", ")", "else", ":", "printDebug", "(", "\"None found\"", ",", "\"comment\"", ")", "print", "(", "Style", ".", "BRIGHT", "+", "\"\\nOntologies\\n-----------\"", "+", "Style", ".", "RESET_ALL", ")", "if", "ontologies", ":", "for", "o", "in", "ontologies", ":", "o", ".", "printTriples", "(", ")", "else", ":", "printDebug", "(", "\"None found\"", ",", "\"comment\"", ")", "print", "(", "Style", ".", "BRIGHT", "+", "\"\\nClasses\\n\"", "+", "\"-\"", "*", "10", "+", "Style", ".", "RESET_ALL", ")", "if", "g", ".", "all_classes", ":", "g", ".", "printClassTree", "(", "showids", "=", "False", ",", "labels", "=", "labels", ")", "else", ":", "printDebug", "(", "\"None found\"", ",", "\"comment\"", ")", "print", "(", "Style", ".", "BRIGHT", "+", "\"\\nProperties\\n\"", "+", "\"-\"", "*", "10", "+", "Style", ".", "RESET_ALL", ")", "if", "g", ".", "all_properties", ":", "g", ".", "printPropertyTree", "(", "showids", "=", "False", ",", "labels", "=", "labels", ")", "else", ":", "printDebug", "(", "\"None found\"", ",", "\"comment\"", ")", "print", "(", "Style", ".", "BRIGHT", "+", "\"\\nSKOS Concepts\\n\"", "+", "\"-\"", "*", "10", "+", "Style", ".", "RESET_ALL", ")", "if", "g", ".", "all_skos_concepts", ":", "g", ".", "printSkosTree", "(", "showids", "=", "False", ",", "labels", "=", "labels", ")", "else", ":", "printDebug", "(", "\"None found\"", ",", "\"comment\"", ")", "print", "(", "Style", ".", "BRIGHT", "+", "\"\\nSHACL Shapes\\n\"", "+", "\"-\"", "*", "10", "+", "Style", ".", "RESET_ALL", ")", "if", "g", ".", "all_shapes", ":", "for", "x", "in", "g", ".", "all_shapes", ":", "printDebug", "(", "\"%s\"", "%", "(", "x", ".", "qname", ")", ")", "# printDebug(\"%s\" % (x.bestLabel()), \"comment\")", "else", ":", "printDebug", "(", "\"None found\"", ",", "\"comment\"", ")" ]
overview of graph invoked from command line @todo add pagination via something like this # import pydoc # pydoc.pager("SOME_VERY_LONG_TEXT")
[ "overview", "of", "graph", "invoked", "from", "command", "line" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/utils.py#L804-L863
17,356
lambdamusic/Ontospy
ontospy/core/utils.py
try_sort_fmt_opts
def try_sort_fmt_opts(rdf_format_opts_list, uri): """reorder fmt options based on uri file type suffix - if available - so to test most likely serialization first when parsing some RDF NOTE this is not very nice as it is hardcoded and assumes the origin serializations to be this: ['turtle', 'xml', 'n3', 'nt', 'json-ld', 'rdfa'] """ filename, file_extension = os.path.splitext(uri) # print(filename, file_extension) if file_extension == ".ttl" or file_extension == ".turtle": return ['turtle', 'n3', 'nt', 'json-ld', 'rdfa', 'xml'] elif file_extension == ".xml" or file_extension == ".rdf": return ['xml', 'turtle', 'n3', 'nt', 'json-ld', 'rdfa'] elif file_extension == ".nt" or file_extension == ".n3": return ['n3', 'nt', 'turtle', 'xml', 'json-ld', 'rdfa'] elif file_extension == ".json" or file_extension == ".jsonld": return [ 'json-ld', 'rdfa', 'n3', 'nt', 'turtle', 'xml', ] elif file_extension == ".rdfa": return [ 'rdfa', 'json-ld', 'n3', 'nt', 'turtle', 'xml', ] else: return rdf_format_opts_list
python
def try_sort_fmt_opts(rdf_format_opts_list, uri): """reorder fmt options based on uri file type suffix - if available - so to test most likely serialization first when parsing some RDF NOTE this is not very nice as it is hardcoded and assumes the origin serializations to be this: ['turtle', 'xml', 'n3', 'nt', 'json-ld', 'rdfa'] """ filename, file_extension = os.path.splitext(uri) # print(filename, file_extension) if file_extension == ".ttl" or file_extension == ".turtle": return ['turtle', 'n3', 'nt', 'json-ld', 'rdfa', 'xml'] elif file_extension == ".xml" or file_extension == ".rdf": return ['xml', 'turtle', 'n3', 'nt', 'json-ld', 'rdfa'] elif file_extension == ".nt" or file_extension == ".n3": return ['n3', 'nt', 'turtle', 'xml', 'json-ld', 'rdfa'] elif file_extension == ".json" or file_extension == ".jsonld": return [ 'json-ld', 'rdfa', 'n3', 'nt', 'turtle', 'xml', ] elif file_extension == ".rdfa": return [ 'rdfa', 'json-ld', 'n3', 'nt', 'turtle', 'xml', ] else: return rdf_format_opts_list
[ "def", "try_sort_fmt_opts", "(", "rdf_format_opts_list", ",", "uri", ")", ":", "filename", ",", "file_extension", "=", "os", ".", "path", ".", "splitext", "(", "uri", ")", "# print(filename, file_extension)", "if", "file_extension", "==", "\".ttl\"", "or", "file_extension", "==", "\".turtle\"", ":", "return", "[", "'turtle'", ",", "'n3'", ",", "'nt'", ",", "'json-ld'", ",", "'rdfa'", ",", "'xml'", "]", "elif", "file_extension", "==", "\".xml\"", "or", "file_extension", "==", "\".rdf\"", ":", "return", "[", "'xml'", ",", "'turtle'", ",", "'n3'", ",", "'nt'", ",", "'json-ld'", ",", "'rdfa'", "]", "elif", "file_extension", "==", "\".nt\"", "or", "file_extension", "==", "\".n3\"", ":", "return", "[", "'n3'", ",", "'nt'", ",", "'turtle'", ",", "'xml'", ",", "'json-ld'", ",", "'rdfa'", "]", "elif", "file_extension", "==", "\".json\"", "or", "file_extension", "==", "\".jsonld\"", ":", "return", "[", "'json-ld'", ",", "'rdfa'", ",", "'n3'", ",", "'nt'", ",", "'turtle'", ",", "'xml'", ",", "]", "elif", "file_extension", "==", "\".rdfa\"", ":", "return", "[", "'rdfa'", ",", "'json-ld'", ",", "'n3'", ",", "'nt'", ",", "'turtle'", ",", "'xml'", ",", "]", "else", ":", "return", "rdf_format_opts_list" ]
reorder fmt options based on uri file type suffix - if available - so to test most likely serialization first when parsing some RDF NOTE this is not very nice as it is hardcoded and assumes the origin serializations to be this: ['turtle', 'xml', 'n3', 'nt', 'json-ld', 'rdfa']
[ "reorder", "fmt", "options", "based", "on", "uri", "file", "type", "suffix", "-", "if", "available", "-", "so", "to", "test", "most", "likely", "serialization", "first", "when", "parsing", "some", "RDF" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/utils.py#L893-L926
17,357
lambdamusic/Ontospy
ontospy/ontodocs/builder.py
ask_visualization
def ask_visualization(): """ ask user which viz output to use """ printDebug( "Please choose an output format for the ontology visualization: (q=quit)\n", "important") while True: text = "" for viz in VISUALIZATIONS_LIST: text += "%d) %s\n" % (VISUALIZATIONS_LIST.index(viz) + 1, viz['Title']) var = input(text + ">") if var == "q": return "" else: try: n = int(var) - 1 test = VISUALIZATIONS_LIST[ n] # throw exception if number wrong return n except: printDebug("Invalid selection. Please try again.", "red") continue
python
def ask_visualization(): """ ask user which viz output to use """ printDebug( "Please choose an output format for the ontology visualization: (q=quit)\n", "important") while True: text = "" for viz in VISUALIZATIONS_LIST: text += "%d) %s\n" % (VISUALIZATIONS_LIST.index(viz) + 1, viz['Title']) var = input(text + ">") if var == "q": return "" else: try: n = int(var) - 1 test = VISUALIZATIONS_LIST[ n] # throw exception if number wrong return n except: printDebug("Invalid selection. Please try again.", "red") continue
[ "def", "ask_visualization", "(", ")", ":", "printDebug", "(", "\"Please choose an output format for the ontology visualization: (q=quit)\\n\"", ",", "\"important\"", ")", "while", "True", ":", "text", "=", "\"\"", "for", "viz", "in", "VISUALIZATIONS_LIST", ":", "text", "+=", "\"%d) %s\\n\"", "%", "(", "VISUALIZATIONS_LIST", ".", "index", "(", "viz", ")", "+", "1", ",", "viz", "[", "'Title'", "]", ")", "var", "=", "input", "(", "text", "+", "\">\"", ")", "if", "var", "==", "\"q\"", ":", "return", "\"\"", "else", ":", "try", ":", "n", "=", "int", "(", "var", ")", "-", "1", "test", "=", "VISUALIZATIONS_LIST", "[", "n", "]", "# throw exception if number wrong", "return", "n", "except", ":", "printDebug", "(", "\"Invalid selection. Please try again.\"", ",", "\"red\"", ")", "continue" ]
ask user which viz output to use
[ "ask", "user", "which", "viz", "output", "to", "use" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/ontodocs/builder.py#L111-L134
17,358
lambdamusic/Ontospy
ontospy/ontodocs/builder.py
select_visualization
def select_visualization(n): """ get viz choice based on numerical index """ try: n = int(n) - 1 test = VISUALIZATIONS_LIST[n] # throw exception if number wrong return n except: printDebug("Invalid viz-type option. Valid options are:", "red") show_types() raise SystemExit(1)
python
def select_visualization(n): """ get viz choice based on numerical index """ try: n = int(n) - 1 test = VISUALIZATIONS_LIST[n] # throw exception if number wrong return n except: printDebug("Invalid viz-type option. Valid options are:", "red") show_types() raise SystemExit(1)
[ "def", "select_visualization", "(", "n", ")", ":", "try", ":", "n", "=", "int", "(", "n", ")", "-", "1", "test", "=", "VISUALIZATIONS_LIST", "[", "n", "]", "# throw exception if number wrong", "return", "n", "except", ":", "printDebug", "(", "\"Invalid viz-type option. Valid options are:\"", ",", "\"red\"", ")", "show_types", "(", ")", "raise", "SystemExit", "(", "1", ")" ]
get viz choice based on numerical index
[ "get", "viz", "choice", "based", "on", "numerical", "index" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/ontodocs/builder.py#L137-L148
17,359
lambdamusic/Ontospy
ontospy/core/actions.py
action_analyze
def action_analyze(sources, endpoint=None, print_opts=False, verbose=False, extra=False, raw=False): """ Load up a model into ontospy and analyze it """ for x in sources: click.secho("Parsing %s..." % str(x), fg='white') if extra: hide_base_schemas = False hide_implicit_types = False hide_implicit_preds = False else: hide_base_schemas = True hide_implicit_types = True hide_implicit_preds = True if raw: o = Ontospy(uri_or_path=sources, verbose=verbose, build_all=False) s = o.serialize() print(s) return elif endpoint: g = Ontospy( sparql_endpoint=sources[0], verbose=verbose, hide_base_schemas=hide_base_schemas, hide_implicit_types=hide_implicit_types, hide_implicit_preds=hide_implicit_preds) printDebug("Extracting classes info") g.build_classes() printDebug("..done") printDebug("Extracting properties info") g.build_properties() printDebug("..done") else: g = Ontospy( uri_or_path=sources, verbose=verbose, hide_base_schemas=hide_base_schemas, hide_implicit_types=hide_implicit_types, hide_implicit_preds=hide_implicit_preds) shellPrintOverview(g, print_opts)
python
def action_analyze(sources, endpoint=None, print_opts=False, verbose=False, extra=False, raw=False): """ Load up a model into ontospy and analyze it """ for x in sources: click.secho("Parsing %s..." % str(x), fg='white') if extra: hide_base_schemas = False hide_implicit_types = False hide_implicit_preds = False else: hide_base_schemas = True hide_implicit_types = True hide_implicit_preds = True if raw: o = Ontospy(uri_or_path=sources, verbose=verbose, build_all=False) s = o.serialize() print(s) return elif endpoint: g = Ontospy( sparql_endpoint=sources[0], verbose=verbose, hide_base_schemas=hide_base_schemas, hide_implicit_types=hide_implicit_types, hide_implicit_preds=hide_implicit_preds) printDebug("Extracting classes info") g.build_classes() printDebug("..done") printDebug("Extracting properties info") g.build_properties() printDebug("..done") else: g = Ontospy( uri_or_path=sources, verbose=verbose, hide_base_schemas=hide_base_schemas, hide_implicit_types=hide_implicit_types, hide_implicit_preds=hide_implicit_preds) shellPrintOverview(g, print_opts)
[ "def", "action_analyze", "(", "sources", ",", "endpoint", "=", "None", ",", "print_opts", "=", "False", ",", "verbose", "=", "False", ",", "extra", "=", "False", ",", "raw", "=", "False", ")", ":", "for", "x", "in", "sources", ":", "click", ".", "secho", "(", "\"Parsing %s...\"", "%", "str", "(", "x", ")", ",", "fg", "=", "'white'", ")", "if", "extra", ":", "hide_base_schemas", "=", "False", "hide_implicit_types", "=", "False", "hide_implicit_preds", "=", "False", "else", ":", "hide_base_schemas", "=", "True", "hide_implicit_types", "=", "True", "hide_implicit_preds", "=", "True", "if", "raw", ":", "o", "=", "Ontospy", "(", "uri_or_path", "=", "sources", ",", "verbose", "=", "verbose", ",", "build_all", "=", "False", ")", "s", "=", "o", ".", "serialize", "(", ")", "print", "(", "s", ")", "return", "elif", "endpoint", ":", "g", "=", "Ontospy", "(", "sparql_endpoint", "=", "sources", "[", "0", "]", ",", "verbose", "=", "verbose", ",", "hide_base_schemas", "=", "hide_base_schemas", ",", "hide_implicit_types", "=", "hide_implicit_types", ",", "hide_implicit_preds", "=", "hide_implicit_preds", ")", "printDebug", "(", "\"Extracting classes info\"", ")", "g", ".", "build_classes", "(", ")", "printDebug", "(", "\"..done\"", ")", "printDebug", "(", "\"Extracting properties info\"", ")", "g", ".", "build_properties", "(", ")", "printDebug", "(", "\"..done\"", ")", "else", ":", "g", "=", "Ontospy", "(", "uri_or_path", "=", "sources", ",", "verbose", "=", "verbose", ",", "hide_base_schemas", "=", "hide_base_schemas", ",", "hide_implicit_types", "=", "hide_implicit_types", ",", "hide_implicit_preds", "=", "hide_implicit_preds", ")", "shellPrintOverview", "(", "g", ",", "print_opts", ")" ]
Load up a model into ontospy and analyze it
[ "Load", "up", "a", "model", "into", "ontospy", "and", "analyze", "it" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/actions.py#L64-L111
17,360
lambdamusic/Ontospy
ontospy/core/actions.py
action_listlocal
def action_listlocal(all_details=True): " select a file from the local repo " options = get_localontologies() counter = 1 # printDebug("------------------", 'comment') if not options: printDebug( "Your local library is empty. Use 'ontospy lib --bootstrap' to add some ontologies to it." ) return else: if all_details: _print_table_ontologies() else: _print2cols_ontologies() while True: printDebug( "------------------\nSelect a model by typing its number: (enter=quit)", "important") var = input() if var == "" or var == "q": return None else: try: _id = int(var) ontouri = options[_id - 1] # printDebug("\nYou selected:", "comment") printDebug( "---------\nYou selected: " + ontouri + "\n---------", "green") return ontouri except: printDebug("Please enter a valid option.", "comment") continue
python
def action_listlocal(all_details=True): " select a file from the local repo " options = get_localontologies() counter = 1 # printDebug("------------------", 'comment') if not options: printDebug( "Your local library is empty. Use 'ontospy lib --bootstrap' to add some ontologies to it." ) return else: if all_details: _print_table_ontologies() else: _print2cols_ontologies() while True: printDebug( "------------------\nSelect a model by typing its number: (enter=quit)", "important") var = input() if var == "" or var == "q": return None else: try: _id = int(var) ontouri = options[_id - 1] # printDebug("\nYou selected:", "comment") printDebug( "---------\nYou selected: " + ontouri + "\n---------", "green") return ontouri except: printDebug("Please enter a valid option.", "comment") continue
[ "def", "action_listlocal", "(", "all_details", "=", "True", ")", ":", "options", "=", "get_localontologies", "(", ")", "counter", "=", "1", "# printDebug(\"------------------\", 'comment')\r", "if", "not", "options", ":", "printDebug", "(", "\"Your local library is empty. Use 'ontospy lib --bootstrap' to add some ontologies to it.\"", ")", "return", "else", ":", "if", "all_details", ":", "_print_table_ontologies", "(", ")", "else", ":", "_print2cols_ontologies", "(", ")", "while", "True", ":", "printDebug", "(", "\"------------------\\nSelect a model by typing its number: (enter=quit)\"", ",", "\"important\"", ")", "var", "=", "input", "(", ")", "if", "var", "==", "\"\"", "or", "var", "==", "\"q\"", ":", "return", "None", "else", ":", "try", ":", "_id", "=", "int", "(", "var", ")", "ontouri", "=", "options", "[", "_id", "-", "1", "]", "# printDebug(\"\\nYou selected:\", \"comment\")\r", "printDebug", "(", "\"---------\\nYou selected: \"", "+", "ontouri", "+", "\"\\n---------\"", ",", "\"green\"", ")", "return", "ontouri", "except", ":", "printDebug", "(", "\"Please enter a valid option.\"", ",", "\"comment\"", ")", "continue" ]
select a file from the local repo
[ "select", "a", "file", "from", "the", "local", "repo" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/actions.py#L156-L192
17,361
lambdamusic/Ontospy
ontospy/core/actions.py
action_import
def action_import(location, verbose=True): """ Import files into the local repo """ location = str(location) # prevent errors from unicode being passed # 1) extract file from location and save locally ONTOSPY_LOCAL_MODELS = get_home_location() fullpath = "" try: if location.startswith("www."): #support for lazy people location = "http://%s" % str(location) if location.startswith("http"): # print("here") headers = {'Accept': "application/rdf+xml"} try: # Py2 req = urllib2.request(location, headers=headers) res = urllib2.urlopen(req) except: # Py3 req = urllib.request.Request(location, headers=headers) res = urlopen(req) final_location = res.geturl() # after 303 redirects printDebug("Saving data from <%s>" % final_location, "green") # filename = final_location.split("/")[-1] or final_location.split("/")[-2] filename = location.replace("http://", "").replace("/", "_") if not filename.lower().endswith( ('.rdf', '.owl', '.rdfs', '.ttl', '.n3')): filename = filename + ".rdf" fullpath = ONTOSPY_LOCAL_MODELS + "/" + filename # 2016-04-08 # fullpath = ONTOSPY_LOCAL_MODELS + filename # print("==DEBUG", final_location, "**", filename,"**", fullpath) file_ = open(fullpath, 'wb') file_.write(res.read()) file_.close() else: if os.path.isfile(location): filename = location.split("/")[-1] or location.split("/")[-2] fullpath = ONTOSPY_LOCAL_MODELS + "/" + filename shutil.copy(location, fullpath) else: raise ValueError('The location specified is not a file.') # print("Saved local copy") except: printDebug( "Error retrieving file. Please make sure <%s> is a valid location." % location, "important") if os.path.exists(fullpath): os.remove(fullpath) return None try: g = Ontospy(fullpath, verbose=verbose) # printDebug("----------") except: g = None if os.path.exists(fullpath): os.remove(fullpath) printDebug( "Error parsing file. Please make sure %s contains valid RDF." % location, "important") if g: printDebug("Caching...", "red") do_pickle_ontology(filename, g) printDebug("----------\n...completed!", "important") # finally... return g
python
def action_import(location, verbose=True): """ Import files into the local repo """ location = str(location) # prevent errors from unicode being passed # 1) extract file from location and save locally ONTOSPY_LOCAL_MODELS = get_home_location() fullpath = "" try: if location.startswith("www."): #support for lazy people location = "http://%s" % str(location) if location.startswith("http"): # print("here") headers = {'Accept': "application/rdf+xml"} try: # Py2 req = urllib2.request(location, headers=headers) res = urllib2.urlopen(req) except: # Py3 req = urllib.request.Request(location, headers=headers) res = urlopen(req) final_location = res.geturl() # after 303 redirects printDebug("Saving data from <%s>" % final_location, "green") # filename = final_location.split("/")[-1] or final_location.split("/")[-2] filename = location.replace("http://", "").replace("/", "_") if not filename.lower().endswith( ('.rdf', '.owl', '.rdfs', '.ttl', '.n3')): filename = filename + ".rdf" fullpath = ONTOSPY_LOCAL_MODELS + "/" + filename # 2016-04-08 # fullpath = ONTOSPY_LOCAL_MODELS + filename # print("==DEBUG", final_location, "**", filename,"**", fullpath) file_ = open(fullpath, 'wb') file_.write(res.read()) file_.close() else: if os.path.isfile(location): filename = location.split("/")[-1] or location.split("/")[-2] fullpath = ONTOSPY_LOCAL_MODELS + "/" + filename shutil.copy(location, fullpath) else: raise ValueError('The location specified is not a file.') # print("Saved local copy") except: printDebug( "Error retrieving file. Please make sure <%s> is a valid location." % location, "important") if os.path.exists(fullpath): os.remove(fullpath) return None try: g = Ontospy(fullpath, verbose=verbose) # printDebug("----------") except: g = None if os.path.exists(fullpath): os.remove(fullpath) printDebug( "Error parsing file. Please make sure %s contains valid RDF." % location, "important") if g: printDebug("Caching...", "red") do_pickle_ontology(filename, g) printDebug("----------\n...completed!", "important") # finally... return g
[ "def", "action_import", "(", "location", ",", "verbose", "=", "True", ")", ":", "location", "=", "str", "(", "location", ")", "# prevent errors from unicode being passed\r", "# 1) extract file from location and save locally\r", "ONTOSPY_LOCAL_MODELS", "=", "get_home_location", "(", ")", "fullpath", "=", "\"\"", "try", ":", "if", "location", ".", "startswith", "(", "\"www.\"", ")", ":", "#support for lazy people\r", "location", "=", "\"http://%s\"", "%", "str", "(", "location", ")", "if", "location", ".", "startswith", "(", "\"http\"", ")", ":", "# print(\"here\")\r", "headers", "=", "{", "'Accept'", ":", "\"application/rdf+xml\"", "}", "try", ":", "# Py2\r", "req", "=", "urllib2", ".", "request", "(", "location", ",", "headers", "=", "headers", ")", "res", "=", "urllib2", ".", "urlopen", "(", "req", ")", "except", ":", "# Py3\r", "req", "=", "urllib", ".", "request", ".", "Request", "(", "location", ",", "headers", "=", "headers", ")", "res", "=", "urlopen", "(", "req", ")", "final_location", "=", "res", ".", "geturl", "(", ")", "# after 303 redirects\r", "printDebug", "(", "\"Saving data from <%s>\"", "%", "final_location", ",", "\"green\"", ")", "# filename = final_location.split(\"/\")[-1] or final_location.split(\"/\")[-2]\r", "filename", "=", "location", ".", "replace", "(", "\"http://\"", ",", "\"\"", ")", ".", "replace", "(", "\"/\"", ",", "\"_\"", ")", "if", "not", "filename", ".", "lower", "(", ")", ".", "endswith", "(", "(", "'.rdf'", ",", "'.owl'", ",", "'.rdfs'", ",", "'.ttl'", ",", "'.n3'", ")", ")", ":", "filename", "=", "filename", "+", "\".rdf\"", "fullpath", "=", "ONTOSPY_LOCAL_MODELS", "+", "\"/\"", "+", "filename", "# 2016-04-08\r", "# fullpath = ONTOSPY_LOCAL_MODELS + filename\r", "# print(\"==DEBUG\", final_location, \"**\", filename,\"**\", fullpath)\r", "file_", "=", "open", "(", "fullpath", ",", "'wb'", ")", "file_", ".", "write", "(", "res", ".", "read", "(", ")", ")", "file_", ".", "close", "(", ")", "else", ":", "if", "os", ".", "path", ".", "isfile", "(", "location", ")", ":", "filename", "=", "location", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "or", "location", ".", "split", "(", "\"/\"", ")", "[", "-", "2", "]", "fullpath", "=", "ONTOSPY_LOCAL_MODELS", "+", "\"/\"", "+", "filename", "shutil", ".", "copy", "(", "location", ",", "fullpath", ")", "else", ":", "raise", "ValueError", "(", "'The location specified is not a file.'", ")", "# print(\"Saved local copy\")\r", "except", ":", "printDebug", "(", "\"Error retrieving file. Please make sure <%s> is a valid location.\"", "%", "location", ",", "\"important\"", ")", "if", "os", ".", "path", ".", "exists", "(", "fullpath", ")", ":", "os", ".", "remove", "(", "fullpath", ")", "return", "None", "try", ":", "g", "=", "Ontospy", "(", "fullpath", ",", "verbose", "=", "verbose", ")", "# printDebug(\"----------\")\r", "except", ":", "g", "=", "None", "if", "os", ".", "path", ".", "exists", "(", "fullpath", ")", ":", "os", ".", "remove", "(", "fullpath", ")", "printDebug", "(", "\"Error parsing file. Please make sure %s contains valid RDF.\"", "%", "location", ",", "\"important\"", ")", "if", "g", ":", "printDebug", "(", "\"Caching...\"", ",", "\"red\"", ")", "do_pickle_ontology", "(", "filename", ",", "g", ")", "printDebug", "(", "\"----------\\n...completed!\"", ",", "\"important\"", ")", "# finally...\r", "return", "g" ]
Import files into the local repo
[ "Import", "files", "into", "the", "local", "repo" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/actions.py#L243-L316
17,362
lambdamusic/Ontospy
ontospy/core/actions.py
action_import_folder
def action_import_folder(location): """Try to import all files from a local folder""" if os.path.isdir(location): onlyfiles = [ f for f in os.listdir(location) if os.path.isfile(os.path.join(location, f)) ] for file in onlyfiles: if not file.startswith("."): filepath = os.path.join(location, file) # print(Fore.RED + "\n---------\n" + filepath + "\n---------" + Style.RESET_ALL) click.secho( "\n---------\n" + filepath + "\n---------", fg='red') return action_import(filepath) else: printDebug("Not a valid directory", "important") return None
python
def action_import_folder(location): """Try to import all files from a local folder""" if os.path.isdir(location): onlyfiles = [ f for f in os.listdir(location) if os.path.isfile(os.path.join(location, f)) ] for file in onlyfiles: if not file.startswith("."): filepath = os.path.join(location, file) # print(Fore.RED + "\n---------\n" + filepath + "\n---------" + Style.RESET_ALL) click.secho( "\n---------\n" + filepath + "\n---------", fg='red') return action_import(filepath) else: printDebug("Not a valid directory", "important") return None
[ "def", "action_import_folder", "(", "location", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "location", ")", ":", "onlyfiles", "=", "[", "f", "for", "f", "in", "os", ".", "listdir", "(", "location", ")", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "location", ",", "f", ")", ")", "]", "for", "file", "in", "onlyfiles", ":", "if", "not", "file", ".", "startswith", "(", "\".\"", ")", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "location", ",", "file", ")", "# print(Fore.RED + \"\\n---------\\n\" + filepath + \"\\n---------\" + Style.RESET_ALL)\r", "click", ".", "secho", "(", "\"\\n---------\\n\"", "+", "filepath", "+", "\"\\n---------\"", ",", "fg", "=", "'red'", ")", "return", "action_import", "(", "filepath", ")", "else", ":", "printDebug", "(", "\"Not a valid directory\"", ",", "\"important\"", ")", "return", "None" ]
Try to import all files from a local folder
[ "Try", "to", "import", "all", "files", "from", "a", "local", "folder" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/actions.py#L319-L336
17,363
lambdamusic/Ontospy
ontospy/core/actions.py
action_webimport
def action_webimport(hrlinetop=False): """ select from the available online directories for import """ DIR_OPTIONS = {1: "http://lov.okfn.org", 2: "http://prefix.cc/popular/"} selection = None while True: if hrlinetop: printDebug("----------") text = "Please select which online directory to scan: (enter=quit)\n" for x in DIR_OPTIONS: text += "%d) %s\n" % (x, DIR_OPTIONS[x]) var = input(text + "> ") if var == "q" or var == "": return None else: try: selection = int(var) test = DIR_OPTIONS[selection] #throw exception if number wrong break except: printDebug("Invalid selection. Please try again.", "important") continue printDebug("----------") text = "Search for a specific keyword? (enter=show all)\n" var = input(text + "> ") keyword = var try: if selection == 1: _import_LOV(keyword=keyword) elif selection == 2: _import_PREFIXCC(keyword=keyword) except: printDebug("Sorry, the online repository seems to be unreachable.") return True
python
def action_webimport(hrlinetop=False): """ select from the available online directories for import """ DIR_OPTIONS = {1: "http://lov.okfn.org", 2: "http://prefix.cc/popular/"} selection = None while True: if hrlinetop: printDebug("----------") text = "Please select which online directory to scan: (enter=quit)\n" for x in DIR_OPTIONS: text += "%d) %s\n" % (x, DIR_OPTIONS[x]) var = input(text + "> ") if var == "q" or var == "": return None else: try: selection = int(var) test = DIR_OPTIONS[selection] #throw exception if number wrong break except: printDebug("Invalid selection. Please try again.", "important") continue printDebug("----------") text = "Search for a specific keyword? (enter=show all)\n" var = input(text + "> ") keyword = var try: if selection == 1: _import_LOV(keyword=keyword) elif selection == 2: _import_PREFIXCC(keyword=keyword) except: printDebug("Sorry, the online repository seems to be unreachable.") return True
[ "def", "action_webimport", "(", "hrlinetop", "=", "False", ")", ":", "DIR_OPTIONS", "=", "{", "1", ":", "\"http://lov.okfn.org\"", ",", "2", ":", "\"http://prefix.cc/popular/\"", "}", "selection", "=", "None", "while", "True", ":", "if", "hrlinetop", ":", "printDebug", "(", "\"----------\"", ")", "text", "=", "\"Please select which online directory to scan: (enter=quit)\\n\"", "for", "x", "in", "DIR_OPTIONS", ":", "text", "+=", "\"%d) %s\\n\"", "%", "(", "x", ",", "DIR_OPTIONS", "[", "x", "]", ")", "var", "=", "input", "(", "text", "+", "\"> \"", ")", "if", "var", "==", "\"q\"", "or", "var", "==", "\"\"", ":", "return", "None", "else", ":", "try", ":", "selection", "=", "int", "(", "var", ")", "test", "=", "DIR_OPTIONS", "[", "selection", "]", "#throw exception if number wrong\r", "break", "except", ":", "printDebug", "(", "\"Invalid selection. Please try again.\"", ",", "\"important\"", ")", "continue", "printDebug", "(", "\"----------\"", ")", "text", "=", "\"Search for a specific keyword? (enter=show all)\\n\"", "var", "=", "input", "(", "text", "+", "\"> \"", ")", "keyword", "=", "var", "try", ":", "if", "selection", "==", "1", ":", "_import_LOV", "(", "keyword", "=", "keyword", ")", "elif", "selection", "==", "2", ":", "_import_PREFIXCC", "(", "keyword", "=", "keyword", ")", "except", ":", "printDebug", "(", "\"Sorry, the online repository seems to be unreachable.\"", ")", "return", "True" ]
select from the available online directories for import
[ "select", "from", "the", "available", "online", "directories", "for", "import" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/actions.py#L339-L374
17,364
lambdamusic/Ontospy
ontospy/core/actions.py
action_bootstrap
def action_bootstrap(verbose=False): """Bootstrap the local REPO with a few cool ontologies""" printDebug("The following ontologies will be imported:") printDebug("--------------") count = 0 for s in BOOTSTRAP_ONTOLOGIES: count += 1 print(count, "<%s>" % s) printDebug("--------------") printDebug("Note: this operation may take several minutes.") printDebug("Proceed? [Y/N]") var = input() if var == "y" or var == "Y": for uri in BOOTSTRAP_ONTOLOGIES: try: printDebug("--------------") action_import(uri, verbose) except: printDebug( "OPS... An Unknown Error Occurred - Aborting Installation") printDebug("\n==========\n" + "Bootstrap command completed.", "important") return True else: printDebug("--------------") printDebug("Goodbye") return False
python
def action_bootstrap(verbose=False): """Bootstrap the local REPO with a few cool ontologies""" printDebug("The following ontologies will be imported:") printDebug("--------------") count = 0 for s in BOOTSTRAP_ONTOLOGIES: count += 1 print(count, "<%s>" % s) printDebug("--------------") printDebug("Note: this operation may take several minutes.") printDebug("Proceed? [Y/N]") var = input() if var == "y" or var == "Y": for uri in BOOTSTRAP_ONTOLOGIES: try: printDebug("--------------") action_import(uri, verbose) except: printDebug( "OPS... An Unknown Error Occurred - Aborting Installation") printDebug("\n==========\n" + "Bootstrap command completed.", "important") return True else: printDebug("--------------") printDebug("Goodbye") return False
[ "def", "action_bootstrap", "(", "verbose", "=", "False", ")", ":", "printDebug", "(", "\"The following ontologies will be imported:\"", ")", "printDebug", "(", "\"--------------\"", ")", "count", "=", "0", "for", "s", "in", "BOOTSTRAP_ONTOLOGIES", ":", "count", "+=", "1", "print", "(", "count", ",", "\"<%s>\"", "%", "s", ")", "printDebug", "(", "\"--------------\"", ")", "printDebug", "(", "\"Note: this operation may take several minutes.\"", ")", "printDebug", "(", "\"Proceed? [Y/N]\"", ")", "var", "=", "input", "(", ")", "if", "var", "==", "\"y\"", "or", "var", "==", "\"Y\"", ":", "for", "uri", "in", "BOOTSTRAP_ONTOLOGIES", ":", "try", ":", "printDebug", "(", "\"--------------\"", ")", "action_import", "(", "uri", ",", "verbose", ")", "except", ":", "printDebug", "(", "\"OPS... An Unknown Error Occurred - Aborting Installation\"", ")", "printDebug", "(", "\"\\n==========\\n\"", "+", "\"Bootstrap command completed.\"", ",", "\"important\"", ")", "return", "True", "else", ":", "printDebug", "(", "\"--------------\"", ")", "printDebug", "(", "\"Goodbye\"", ")", "return", "False" ]
Bootstrap the local REPO with a few cool ontologies
[ "Bootstrap", "the", "local", "REPO", "with", "a", "few", "cool", "ontologies" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/actions.py#L487-L514
17,365
lambdamusic/Ontospy
ontospy/core/actions.py
action_update_library_location
def action_update_library_location(_location): """ Sets the folder that contains models for the local library @todo: add options to move things over etc.. note: this is called from 'manager' """ # if not(os.path.exists(_location)): # os.mkdir(_location) # printDebug("Creating new folder..", "comment") printDebug("Old location: '%s'" % get_home_location(), "comment") if os.path.isdir(_location): config = SafeConfigParser() config_filename = ONTOSPY_LOCAL + '/config.ini' config.read(config_filename) if not config.has_section('models'): config.add_section('models') config.set('models', 'dir', _location) with open(config_filename, 'w') as f: config.write( f) # note: this does not remove previously saved settings return _location else: return None
python
def action_update_library_location(_location): """ Sets the folder that contains models for the local library @todo: add options to move things over etc.. note: this is called from 'manager' """ # if not(os.path.exists(_location)): # os.mkdir(_location) # printDebug("Creating new folder..", "comment") printDebug("Old location: '%s'" % get_home_location(), "comment") if os.path.isdir(_location): config = SafeConfigParser() config_filename = ONTOSPY_LOCAL + '/config.ini' config.read(config_filename) if not config.has_section('models'): config.add_section('models') config.set('models', 'dir', _location) with open(config_filename, 'w') as f: config.write( f) # note: this does not remove previously saved settings return _location else: return None
[ "def", "action_update_library_location", "(", "_location", ")", ":", "# if not(os.path.exists(_location)):\r", "# \tos.mkdir(_location)\r", "# \tprintDebug(\"Creating new folder..\", \"comment\")\r", "printDebug", "(", "\"Old location: '%s'\"", "%", "get_home_location", "(", ")", ",", "\"comment\"", ")", "if", "os", ".", "path", ".", "isdir", "(", "_location", ")", ":", "config", "=", "SafeConfigParser", "(", ")", "config_filename", "=", "ONTOSPY_LOCAL", "+", "'/config.ini'", "config", ".", "read", "(", "config_filename", ")", "if", "not", "config", ".", "has_section", "(", "'models'", ")", ":", "config", ".", "add_section", "(", "'models'", ")", "config", ".", "set", "(", "'models'", ",", "'dir'", ",", "_location", ")", "with", "open", "(", "config_filename", ",", "'w'", ")", "as", "f", ":", "config", ".", "write", "(", "f", ")", "# note: this does not remove previously saved settings\r", "return", "_location", "else", ":", "return", "None" ]
Sets the folder that contains models for the local library @todo: add options to move things over etc.. note: this is called from 'manager'
[ "Sets", "the", "folder", "that", "contains", "models", "for", "the", "local", "library" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/actions.py#L517-L545
17,366
lambdamusic/Ontospy
ontospy/core/actions.py
action_cache_reset
def action_cache_reset(): """ Delete all contents from cache folder Then re-generate cached version of all models in the local repo """ printDebug("""The existing cache will be erased and recreated.""") printDebug( """This operation may take several minutes, depending on how many files exist in your local library.""" ) ONTOSPY_LOCAL_MODELS = get_home_location() # https://stackoverflow.com/questions/185936/how-to-delete-the-contents-of-a-folder-in-python # NOTE This will not only delete the contents but the folder itself as well. shutil.rmtree(ONTOSPY_LOCAL_CACHE_TOP) var = input(Style.BRIGHT + "=====\nProceed? (y/n) " + Style.RESET_ALL) if var == "y": repo_contents = get_localontologies() print(Style.BRIGHT + "\n=====\n%d ontologies available in the local library\n=====" % len(repo_contents) + Style.RESET_ALL) for onto in repo_contents: fullpath = ONTOSPY_LOCAL_MODELS + "/" + onto try: print(Fore.RED + "\n=====\n" + onto + Style.RESET_ALL) print("Loading graph...") g = Ontospy(fullpath) print("Loaded ", fullpath) except: g = None print( "Error parsing file. Please make sure %s contains valid RDF." % fullpath) if g: print("Caching...") do_pickle_ontology(onto, g) print(Style.BRIGHT + "===Completed===" + Style.RESET_ALL) else: print("Goodbye")
python
def action_cache_reset(): """ Delete all contents from cache folder Then re-generate cached version of all models in the local repo """ printDebug("""The existing cache will be erased and recreated.""") printDebug( """This operation may take several minutes, depending on how many files exist in your local library.""" ) ONTOSPY_LOCAL_MODELS = get_home_location() # https://stackoverflow.com/questions/185936/how-to-delete-the-contents-of-a-folder-in-python # NOTE This will not only delete the contents but the folder itself as well. shutil.rmtree(ONTOSPY_LOCAL_CACHE_TOP) var = input(Style.BRIGHT + "=====\nProceed? (y/n) " + Style.RESET_ALL) if var == "y": repo_contents = get_localontologies() print(Style.BRIGHT + "\n=====\n%d ontologies available in the local library\n=====" % len(repo_contents) + Style.RESET_ALL) for onto in repo_contents: fullpath = ONTOSPY_LOCAL_MODELS + "/" + onto try: print(Fore.RED + "\n=====\n" + onto + Style.RESET_ALL) print("Loading graph...") g = Ontospy(fullpath) print("Loaded ", fullpath) except: g = None print( "Error parsing file. Please make sure %s contains valid RDF." % fullpath) if g: print("Caching...") do_pickle_ontology(onto, g) print(Style.BRIGHT + "===Completed===" + Style.RESET_ALL) else: print("Goodbye")
[ "def", "action_cache_reset", "(", ")", ":", "printDebug", "(", "\"\"\"The existing cache will be erased and recreated.\"\"\"", ")", "printDebug", "(", "\"\"\"This operation may take several minutes, depending on how many files exist in your local library.\"\"\"", ")", "ONTOSPY_LOCAL_MODELS", "=", "get_home_location", "(", ")", "# https://stackoverflow.com/questions/185936/how-to-delete-the-contents-of-a-folder-in-python\r", "# NOTE This will not only delete the contents but the folder itself as well.\r", "shutil", ".", "rmtree", "(", "ONTOSPY_LOCAL_CACHE_TOP", ")", "var", "=", "input", "(", "Style", ".", "BRIGHT", "+", "\"=====\\nProceed? (y/n) \"", "+", "Style", ".", "RESET_ALL", ")", "if", "var", "==", "\"y\"", ":", "repo_contents", "=", "get_localontologies", "(", ")", "print", "(", "Style", ".", "BRIGHT", "+", "\"\\n=====\\n%d ontologies available in the local library\\n=====\"", "%", "len", "(", "repo_contents", ")", "+", "Style", ".", "RESET_ALL", ")", "for", "onto", "in", "repo_contents", ":", "fullpath", "=", "ONTOSPY_LOCAL_MODELS", "+", "\"/\"", "+", "onto", "try", ":", "print", "(", "Fore", ".", "RED", "+", "\"\\n=====\\n\"", "+", "onto", "+", "Style", ".", "RESET_ALL", ")", "print", "(", "\"Loading graph...\"", ")", "g", "=", "Ontospy", "(", "fullpath", ")", "print", "(", "\"Loaded \"", ",", "fullpath", ")", "except", ":", "g", "=", "None", "print", "(", "\"Error parsing file. Please make sure %s contains valid RDF.\"", "%", "fullpath", ")", "if", "g", ":", "print", "(", "\"Caching...\"", ")", "do_pickle_ontology", "(", "onto", ",", "g", ")", "print", "(", "Style", ".", "BRIGHT", "+", "\"===Completed===\"", "+", "Style", ".", "RESET_ALL", ")", "else", ":", "print", "(", "\"Goodbye\"", ")" ]
Delete all contents from cache folder Then re-generate cached version of all models in the local repo
[ "Delete", "all", "contents", "from", "cache", "folder", "Then", "re", "-", "generate", "cached", "version", "of", "all", "models", "in", "the", "local", "repo" ]
eb46cb13792b2b87f21babdf976996318eec7571
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/actions.py#L548-L589
17,367
gabrielfalcao/sure
sure/core.py
DeepComparison.compare_ordereddict
def compare_ordereddict(self, X, Y): """Compares two instances of an OrderedDict.""" # check if OrderedDict instances have the same keys and values child = self.compare_dicts(X, Y) if isinstance(child, DeepExplanation): return child # check if the order of the keys is the same for i, j in zip(X.items(), Y.items()): if i[0] != j[0]: c = self.get_context() msg = "X{0} and Y{1} are in a different order".format( red(c.current_X_keys), green(c.current_Y_keys) ) return DeepExplanation(msg) return True
python
def compare_ordereddict(self, X, Y): """Compares two instances of an OrderedDict.""" # check if OrderedDict instances have the same keys and values child = self.compare_dicts(X, Y) if isinstance(child, DeepExplanation): return child # check if the order of the keys is the same for i, j in zip(X.items(), Y.items()): if i[0] != j[0]: c = self.get_context() msg = "X{0} and Y{1} are in a different order".format( red(c.current_X_keys), green(c.current_Y_keys) ) return DeepExplanation(msg) return True
[ "def", "compare_ordereddict", "(", "self", ",", "X", ",", "Y", ")", ":", "# check if OrderedDict instances have the same keys and values", "child", "=", "self", ".", "compare_dicts", "(", "X", ",", "Y", ")", "if", "isinstance", "(", "child", ",", "DeepExplanation", ")", ":", "return", "child", "# check if the order of the keys is the same", "for", "i", ",", "j", "in", "zip", "(", "X", ".", "items", "(", ")", ",", "Y", ".", "items", "(", ")", ")", ":", "if", "i", "[", "0", "]", "!=", "j", "[", "0", "]", ":", "c", "=", "self", ".", "get_context", "(", ")", "msg", "=", "\"X{0} and Y{1} are in a different order\"", ".", "format", "(", "red", "(", "c", ".", "current_X_keys", ")", ",", "green", "(", "c", ".", "current_Y_keys", ")", ")", "return", "DeepExplanation", "(", "msg", ")", "return", "True" ]
Compares two instances of an OrderedDict.
[ "Compares", "two", "instances", "of", "an", "OrderedDict", "." ]
ac23b6b87306ec502b8719534ab23965d97a95f9
https://github.com/gabrielfalcao/sure/blob/ac23b6b87306ec502b8719534ab23965d97a95f9/sure/core.py#L146-L162
17,368
gabrielfalcao/sure
sure/stubs.py
stub
def stub(base_class=None, **attributes): """creates a python class on-the-fly with the given keyword-arguments as class-attributes accessible with .attrname. The new class inherits from Use this to mock rather than stub. """ if base_class is None: base_class = object members = { "__init__": lambda self: None, "__new__": lambda *args, **kw: object.__new__( *args, *kw ), # remove __new__ and metaclass behavior from object "__metaclass__": None, } members.update(attributes) # let's create a python class on-the-fly :) return type(f"{base_class.__name__}Stub", (base_class,), members)()
python
def stub(base_class=None, **attributes): """creates a python class on-the-fly with the given keyword-arguments as class-attributes accessible with .attrname. The new class inherits from Use this to mock rather than stub. """ if base_class is None: base_class = object members = { "__init__": lambda self: None, "__new__": lambda *args, **kw: object.__new__( *args, *kw ), # remove __new__ and metaclass behavior from object "__metaclass__": None, } members.update(attributes) # let's create a python class on-the-fly :) return type(f"{base_class.__name__}Stub", (base_class,), members)()
[ "def", "stub", "(", "base_class", "=", "None", ",", "*", "*", "attributes", ")", ":", "if", "base_class", "is", "None", ":", "base_class", "=", "object", "members", "=", "{", "\"__init__\"", ":", "lambda", "self", ":", "None", ",", "\"__new__\"", ":", "lambda", "*", "args", ",", "*", "*", "kw", ":", "object", ".", "__new__", "(", "*", "args", ",", "*", "kw", ")", ",", "# remove __new__ and metaclass behavior from object", "\"__metaclass__\"", ":", "None", ",", "}", "members", ".", "update", "(", "attributes", ")", "# let's create a python class on-the-fly :)", "return", "type", "(", "f\"{base_class.__name__}Stub\"", ",", "(", "base_class", ",", ")", ",", "members", ")", "(", ")" ]
creates a python class on-the-fly with the given keyword-arguments as class-attributes accessible with .attrname. The new class inherits from Use this to mock rather than stub.
[ "creates", "a", "python", "class", "on", "-", "the", "-", "fly", "with", "the", "given", "keyword", "-", "arguments", "as", "class", "-", "attributes", "accessible", "with", ".", "attrname", "." ]
ac23b6b87306ec502b8719534ab23965d97a95f9
https://github.com/gabrielfalcao/sure/blob/ac23b6b87306ec502b8719534ab23965d97a95f9/sure/stubs.py#L19-L38
17,369
gabrielfalcao/sure
sure/__init__.py
assertion
def assertion(func): """Extend sure with a custom assertion method.""" func = assertionmethod(func) setattr(AssertionBuilder, func.__name__, func) return func
python
def assertion(func): """Extend sure with a custom assertion method.""" func = assertionmethod(func) setattr(AssertionBuilder, func.__name__, func) return func
[ "def", "assertion", "(", "func", ")", ":", "func", "=", "assertionmethod", "(", "func", ")", "setattr", "(", "AssertionBuilder", ",", "func", ".", "__name__", ",", "func", ")", "return", "func" ]
Extend sure with a custom assertion method.
[ "Extend", "sure", "with", "a", "custom", "assertion", "method", "." ]
ac23b6b87306ec502b8719534ab23965d97a95f9
https://github.com/gabrielfalcao/sure/blob/ac23b6b87306ec502b8719534ab23965d97a95f9/sure/__init__.py#L928-L932
17,370
gabrielfalcao/sure
sure/__init__.py
chainproperty
def chainproperty(func): """Extend sure with a custom chain property.""" func = assertionproperty(func) setattr(AssertionBuilder, func.fget.__name__, func) return func
python
def chainproperty(func): """Extend sure with a custom chain property.""" func = assertionproperty(func) setattr(AssertionBuilder, func.fget.__name__, func) return func
[ "def", "chainproperty", "(", "func", ")", ":", "func", "=", "assertionproperty", "(", "func", ")", "setattr", "(", "AssertionBuilder", ",", "func", ".", "fget", ".", "__name__", ",", "func", ")", "return", "func" ]
Extend sure with a custom chain property.
[ "Extend", "sure", "with", "a", "custom", "chain", "property", "." ]
ac23b6b87306ec502b8719534ab23965d97a95f9
https://github.com/gabrielfalcao/sure/blob/ac23b6b87306ec502b8719534ab23965d97a95f9/sure/__init__.py#L941-L945
17,371
gabrielfalcao/sure
sure/__init__.py
AssertionBuilder.equal
def equal(self, what, epsilon=None): """compares given object ``X`` with an expected ``Y`` object. It primarily assures that the compared objects are absolute equal ``==``. :param what: the expected value :param epsilon: a delta to leverage upper-bound floating point permissiveness """ try: comparison = DeepComparison(self.obj, what, epsilon).compare() error = False except AssertionError as e: error = e comparison = None if isinstance(comparison, DeepExplanation): error = comparison.get_assertion(self.obj, what) if self.negative: if error: return True msg = '%s should differ from %s, but is the same thing' raise AssertionError(msg % (safe_repr(self.obj), safe_repr(what))) else: if not error: return True raise error
python
def equal(self, what, epsilon=None): """compares given object ``X`` with an expected ``Y`` object. It primarily assures that the compared objects are absolute equal ``==``. :param what: the expected value :param epsilon: a delta to leverage upper-bound floating point permissiveness """ try: comparison = DeepComparison(self.obj, what, epsilon).compare() error = False except AssertionError as e: error = e comparison = None if isinstance(comparison, DeepExplanation): error = comparison.get_assertion(self.obj, what) if self.negative: if error: return True msg = '%s should differ from %s, but is the same thing' raise AssertionError(msg % (safe_repr(self.obj), safe_repr(what))) else: if not error: return True raise error
[ "def", "equal", "(", "self", ",", "what", ",", "epsilon", "=", "None", ")", ":", "try", ":", "comparison", "=", "DeepComparison", "(", "self", ".", "obj", ",", "what", ",", "epsilon", ")", ".", "compare", "(", ")", "error", "=", "False", "except", "AssertionError", "as", "e", ":", "error", "=", "e", "comparison", "=", "None", "if", "isinstance", "(", "comparison", ",", "DeepExplanation", ")", ":", "error", "=", "comparison", ".", "get_assertion", "(", "self", ".", "obj", ",", "what", ")", "if", "self", ".", "negative", ":", "if", "error", ":", "return", "True", "msg", "=", "'%s should differ from %s, but is the same thing'", "raise", "AssertionError", "(", "msg", "%", "(", "safe_repr", "(", "self", ".", "obj", ")", ",", "safe_repr", "(", "what", ")", ")", ")", "else", ":", "if", "not", "error", ":", "return", "True", "raise", "error" ]
compares given object ``X`` with an expected ``Y`` object. It primarily assures that the compared objects are absolute equal ``==``. :param what: the expected value :param epsilon: a delta to leverage upper-bound floating point permissiveness
[ "compares", "given", "object", "X", "with", "an", "expected", "Y", "object", "." ]
ac23b6b87306ec502b8719534ab23965d97a95f9
https://github.com/gabrielfalcao/sure/blob/ac23b6b87306ec502b8719534ab23965d97a95f9/sure/__init__.py#L644-L673
17,372
aspiers/git-deps
git_deps/detector.py
DependencyDetector.find_dependencies
def find_dependencies(self, dependent_rev, recurse=None): """Find all dependencies of the given revision, recursively traversing the dependency tree if requested. """ if recurse is None: recurse = self.options.recurse try: dependent = self.get_commit(dependent_rev) except InvalidCommitish as e: abort(e.message()) self.todo.append(dependent) self.todo_d[dependent.hex] = True first_time = True while self.todo: sha1s = [commit.hex[:8] for commit in self.todo] if first_time: self.logger.info("Initial TODO list: %s" % " ".join(sha1s)) first_time = False else: self.logger.info(" TODO list now: %s" % " ".join(sha1s)) dependent = self.todo.pop(0) dependent_sha1 = dependent.hex del self.todo_d[dependent_sha1] self.logger.info(" Processing %s from TODO list" % dependent_sha1[:8]) if dependent_sha1 in self.done_d: self.logger.info(" %s already done previously" % dependent_sha1) continue self.notify_listeners('new_commit', dependent) parent = dependent.parents[0] self.find_dependencies_with_parent(dependent, parent) self.done.append(dependent_sha1) self.done_d[dependent_sha1] = True self.logger.info(" Found all dependencies for %s" % dependent_sha1[:8]) # A commit won't have any dependencies if it only added new files dependencies = self.dependencies.get(dependent_sha1, {}) self.notify_listeners('dependent_done', dependent, dependencies) self.logger.info("Finished processing TODO list") self.notify_listeners('all_done')
python
def find_dependencies(self, dependent_rev, recurse=None): """Find all dependencies of the given revision, recursively traversing the dependency tree if requested. """ if recurse is None: recurse = self.options.recurse try: dependent = self.get_commit(dependent_rev) except InvalidCommitish as e: abort(e.message()) self.todo.append(dependent) self.todo_d[dependent.hex] = True first_time = True while self.todo: sha1s = [commit.hex[:8] for commit in self.todo] if first_time: self.logger.info("Initial TODO list: %s" % " ".join(sha1s)) first_time = False else: self.logger.info(" TODO list now: %s" % " ".join(sha1s)) dependent = self.todo.pop(0) dependent_sha1 = dependent.hex del self.todo_d[dependent_sha1] self.logger.info(" Processing %s from TODO list" % dependent_sha1[:8]) if dependent_sha1 in self.done_d: self.logger.info(" %s already done previously" % dependent_sha1) continue self.notify_listeners('new_commit', dependent) parent = dependent.parents[0] self.find_dependencies_with_parent(dependent, parent) self.done.append(dependent_sha1) self.done_d[dependent_sha1] = True self.logger.info(" Found all dependencies for %s" % dependent_sha1[:8]) # A commit won't have any dependencies if it only added new files dependencies = self.dependencies.get(dependent_sha1, {}) self.notify_listeners('dependent_done', dependent, dependencies) self.logger.info("Finished processing TODO list") self.notify_listeners('all_done')
[ "def", "find_dependencies", "(", "self", ",", "dependent_rev", ",", "recurse", "=", "None", ")", ":", "if", "recurse", "is", "None", ":", "recurse", "=", "self", ".", "options", ".", "recurse", "try", ":", "dependent", "=", "self", ".", "get_commit", "(", "dependent_rev", ")", "except", "InvalidCommitish", "as", "e", ":", "abort", "(", "e", ".", "message", "(", ")", ")", "self", ".", "todo", ".", "append", "(", "dependent", ")", "self", ".", "todo_d", "[", "dependent", ".", "hex", "]", "=", "True", "first_time", "=", "True", "while", "self", ".", "todo", ":", "sha1s", "=", "[", "commit", ".", "hex", "[", ":", "8", "]", "for", "commit", "in", "self", ".", "todo", "]", "if", "first_time", ":", "self", ".", "logger", ".", "info", "(", "\"Initial TODO list: %s\"", "%", "\" \"", ".", "join", "(", "sha1s", ")", ")", "first_time", "=", "False", "else", ":", "self", ".", "logger", ".", "info", "(", "\" TODO list now: %s\"", "%", "\" \"", ".", "join", "(", "sha1s", ")", ")", "dependent", "=", "self", ".", "todo", ".", "pop", "(", "0", ")", "dependent_sha1", "=", "dependent", ".", "hex", "del", "self", ".", "todo_d", "[", "dependent_sha1", "]", "self", ".", "logger", ".", "info", "(", "\" Processing %s from TODO list\"", "%", "dependent_sha1", "[", ":", "8", "]", ")", "if", "dependent_sha1", "in", "self", ".", "done_d", ":", "self", ".", "logger", ".", "info", "(", "\" %s already done previously\"", "%", "dependent_sha1", ")", "continue", "self", ".", "notify_listeners", "(", "'new_commit'", ",", "dependent", ")", "parent", "=", "dependent", ".", "parents", "[", "0", "]", "self", ".", "find_dependencies_with_parent", "(", "dependent", ",", "parent", ")", "self", ".", "done", ".", "append", "(", "dependent_sha1", ")", "self", ".", "done_d", "[", "dependent_sha1", "]", "=", "True", "self", ".", "logger", ".", "info", "(", "\" Found all dependencies for %s\"", "%", "dependent_sha1", "[", ":", "8", "]", ")", "# A commit won't have any dependencies if it only added new files", "dependencies", "=", "self", ".", "dependencies", ".", "get", "(", "dependent_sha1", ",", "{", "}", ")", "self", ".", "notify_listeners", "(", "'dependent_done'", ",", "dependent", ",", "dependencies", ")", "self", ".", "logger", ".", "info", "(", "\"Finished processing TODO list\"", ")", "self", ".", "notify_listeners", "(", "'all_done'", ")" ]
Find all dependencies of the given revision, recursively traversing the dependency tree if requested.
[ "Find", "all", "dependencies", "of", "the", "given", "revision", "recursively", "traversing", "the", "dependency", "tree", "if", "requested", "." ]
a00380b8bf1451d8c3405dace8d5927428506eb0
https://github.com/aspiers/git-deps/blob/a00380b8bf1451d8c3405dace8d5927428506eb0/git_deps/detector.py#L84-L132
17,373
aspiers/git-deps
git_deps/detector.py
DependencyDetector.find_dependencies_with_parent
def find_dependencies_with_parent(self, dependent, parent): """Find all dependencies of the given revision caused by the given parent commit. This will be called multiple times for merge commits which have multiple parents. """ self.logger.info(" Finding dependencies of %s via parent %s" % (dependent.hex[:8], parent.hex[:8])) diff = self.repo.diff(parent, dependent, context_lines=self.options.context_lines) for patch in diff: path = patch.delta.old_file.path self.logger.info(" Examining hunks in %s" % path) for hunk in patch.hunks: self.blame_diff_hunk(dependent, parent, path, hunk)
python
def find_dependencies_with_parent(self, dependent, parent): """Find all dependencies of the given revision caused by the given parent commit. This will be called multiple times for merge commits which have multiple parents. """ self.logger.info(" Finding dependencies of %s via parent %s" % (dependent.hex[:8], parent.hex[:8])) diff = self.repo.diff(parent, dependent, context_lines=self.options.context_lines) for patch in diff: path = patch.delta.old_file.path self.logger.info(" Examining hunks in %s" % path) for hunk in patch.hunks: self.blame_diff_hunk(dependent, parent, path, hunk)
[ "def", "find_dependencies_with_parent", "(", "self", ",", "dependent", ",", "parent", ")", ":", "self", ".", "logger", ".", "info", "(", "\" Finding dependencies of %s via parent %s\"", "%", "(", "dependent", ".", "hex", "[", ":", "8", "]", ",", "parent", ".", "hex", "[", ":", "8", "]", ")", ")", "diff", "=", "self", ".", "repo", ".", "diff", "(", "parent", ",", "dependent", ",", "context_lines", "=", "self", ".", "options", ".", "context_lines", ")", "for", "patch", "in", "diff", ":", "path", "=", "patch", ".", "delta", ".", "old_file", ".", "path", "self", ".", "logger", ".", "info", "(", "\" Examining hunks in %s\"", "%", "path", ")", "for", "hunk", "in", "patch", ".", "hunks", ":", "self", ".", "blame_diff_hunk", "(", "dependent", ",", "parent", ",", "path", ",", "hunk", ")" ]
Find all dependencies of the given revision caused by the given parent commit. This will be called multiple times for merge commits which have multiple parents.
[ "Find", "all", "dependencies", "of", "the", "given", "revision", "caused", "by", "the", "given", "parent", "commit", ".", "This", "will", "be", "called", "multiple", "times", "for", "merge", "commits", "which", "have", "multiple", "parents", "." ]
a00380b8bf1451d8c3405dace8d5927428506eb0
https://github.com/aspiers/git-deps/blob/a00380b8bf1451d8c3405dace8d5927428506eb0/git_deps/detector.py#L134-L147
17,374
aspiers/git-deps
git_deps/detector.py
DependencyDetector.blame_diff_hunk
def blame_diff_hunk(self, dependent, parent, path, hunk): """Run git blame on the parts of the hunk which exist in the older commit in the diff. The commits generated by git blame are the commits which the newer commit in the diff depends on, because without the lines from those commits, the hunk would not apply correctly. """ line_range_before = "-%d,%d" % (hunk.old_start, hunk.old_lines) line_range_after = "+%d,%d" % (hunk.new_start, hunk.new_lines) self.logger.info(" Blaming hunk %s @ %s (listed below)" % (line_range_before, parent.hex[:8])) if not self.tree_lookup(path, parent): # This is probably because dependent added a new directory # which was not previously in the parent. return blame = self.run_blame(hunk, parent, path) dependent_sha1 = dependent.hex self.register_new_dependent(dependent, dependent_sha1) line_to_culprit = {} for line in blame.split('\n'): self.process_hunk_line(dependent, dependent_sha1, parent, path, line, line_to_culprit) self.debug_hunk(line_range_before, line_range_after, hunk, line_to_culprit)
python
def blame_diff_hunk(self, dependent, parent, path, hunk): """Run git blame on the parts of the hunk which exist in the older commit in the diff. The commits generated by git blame are the commits which the newer commit in the diff depends on, because without the lines from those commits, the hunk would not apply correctly. """ line_range_before = "-%d,%d" % (hunk.old_start, hunk.old_lines) line_range_after = "+%d,%d" % (hunk.new_start, hunk.new_lines) self.logger.info(" Blaming hunk %s @ %s (listed below)" % (line_range_before, parent.hex[:8])) if not self.tree_lookup(path, parent): # This is probably because dependent added a new directory # which was not previously in the parent. return blame = self.run_blame(hunk, parent, path) dependent_sha1 = dependent.hex self.register_new_dependent(dependent, dependent_sha1) line_to_culprit = {} for line in blame.split('\n'): self.process_hunk_line(dependent, dependent_sha1, parent, path, line, line_to_culprit) self.debug_hunk(line_range_before, line_range_after, hunk, line_to_culprit)
[ "def", "blame_diff_hunk", "(", "self", ",", "dependent", ",", "parent", ",", "path", ",", "hunk", ")", ":", "line_range_before", "=", "\"-%d,%d\"", "%", "(", "hunk", ".", "old_start", ",", "hunk", ".", "old_lines", ")", "line_range_after", "=", "\"+%d,%d\"", "%", "(", "hunk", ".", "new_start", ",", "hunk", ".", "new_lines", ")", "self", ".", "logger", ".", "info", "(", "\" Blaming hunk %s @ %s (listed below)\"", "%", "(", "line_range_before", ",", "parent", ".", "hex", "[", ":", "8", "]", ")", ")", "if", "not", "self", ".", "tree_lookup", "(", "path", ",", "parent", ")", ":", "# This is probably because dependent added a new directory", "# which was not previously in the parent.", "return", "blame", "=", "self", ".", "run_blame", "(", "hunk", ",", "parent", ",", "path", ")", "dependent_sha1", "=", "dependent", ".", "hex", "self", ".", "register_new_dependent", "(", "dependent", ",", "dependent_sha1", ")", "line_to_culprit", "=", "{", "}", "for", "line", "in", "blame", ".", "split", "(", "'\\n'", ")", ":", "self", ".", "process_hunk_line", "(", "dependent", ",", "dependent_sha1", ",", "parent", ",", "path", ",", "line", ",", "line_to_culprit", ")", "self", ".", "debug_hunk", "(", "line_range_before", ",", "line_range_after", ",", "hunk", ",", "line_to_culprit", ")" ]
Run git blame on the parts of the hunk which exist in the older commit in the diff. The commits generated by git blame are the commits which the newer commit in the diff depends on, because without the lines from those commits, the hunk would not apply correctly.
[ "Run", "git", "blame", "on", "the", "parts", "of", "the", "hunk", "which", "exist", "in", "the", "older", "commit", "in", "the", "diff", ".", "The", "commits", "generated", "by", "git", "blame", "are", "the", "commits", "which", "the", "newer", "commit", "in", "the", "diff", "depends", "on", "because", "without", "the", "lines", "from", "those", "commits", "the", "hunk", "would", "not", "apply", "correctly", "." ]
a00380b8bf1451d8c3405dace8d5927428506eb0
https://github.com/aspiers/git-deps/blob/a00380b8bf1451d8c3405dace8d5927428506eb0/git_deps/detector.py#L149-L178
17,375
aspiers/git-deps
git_deps/detector.py
DependencyDetector.tree_lookup
def tree_lookup(self, target_path, commit): """Navigate to the tree or blob object pointed to by the given target path for the given commit. This is necessary because each git tree only contains entries for the directory it refers to, not recursively for all subdirectories. """ segments = target_path.split("/") tree_or_blob = commit.tree path = '' while segments: dirent = segments.pop(0) if isinstance(tree_or_blob, pygit2.Tree): if dirent in tree_or_blob: tree_or_blob = self.repo[tree_or_blob[dirent].oid] # self.logger.debug(" %s in %s" % (dirent, path)) if path: path += '/' path += dirent else: # This is probably because we were called on a # commit whose parent added a new directory. self.logger.debug(" %s not in %s in %s" % (dirent, path, commit.hex[:8])) return None else: self.logger.debug(" %s not a tree in %s" % (tree_or_blob, commit.hex[:8])) return None return tree_or_blob
python
def tree_lookup(self, target_path, commit): """Navigate to the tree or blob object pointed to by the given target path for the given commit. This is necessary because each git tree only contains entries for the directory it refers to, not recursively for all subdirectories. """ segments = target_path.split("/") tree_or_blob = commit.tree path = '' while segments: dirent = segments.pop(0) if isinstance(tree_or_blob, pygit2.Tree): if dirent in tree_or_blob: tree_or_blob = self.repo[tree_or_blob[dirent].oid] # self.logger.debug(" %s in %s" % (dirent, path)) if path: path += '/' path += dirent else: # This is probably because we were called on a # commit whose parent added a new directory. self.logger.debug(" %s not in %s in %s" % (dirent, path, commit.hex[:8])) return None else: self.logger.debug(" %s not a tree in %s" % (tree_or_blob, commit.hex[:8])) return None return tree_or_blob
[ "def", "tree_lookup", "(", "self", ",", "target_path", ",", "commit", ")", ":", "segments", "=", "target_path", ".", "split", "(", "\"/\"", ")", "tree_or_blob", "=", "commit", ".", "tree", "path", "=", "''", "while", "segments", ":", "dirent", "=", "segments", ".", "pop", "(", "0", ")", "if", "isinstance", "(", "tree_or_blob", ",", "pygit2", ".", "Tree", ")", ":", "if", "dirent", "in", "tree_or_blob", ":", "tree_or_blob", "=", "self", ".", "repo", "[", "tree_or_blob", "[", "dirent", "]", ".", "oid", "]", "# self.logger.debug(\" %s in %s\" % (dirent, path))", "if", "path", ":", "path", "+=", "'/'", "path", "+=", "dirent", "else", ":", "# This is probably because we were called on a", "# commit whose parent added a new directory.", "self", ".", "logger", ".", "debug", "(", "\" %s not in %s in %s\"", "%", "(", "dirent", ",", "path", ",", "commit", ".", "hex", "[", ":", "8", "]", ")", ")", "return", "None", "else", ":", "self", ".", "logger", ".", "debug", "(", "\" %s not a tree in %s\"", "%", "(", "tree_or_blob", ",", "commit", ".", "hex", "[", ":", "8", "]", ")", ")", "return", "None", "return", "tree_or_blob" ]
Navigate to the tree or blob object pointed to by the given target path for the given commit. This is necessary because each git tree only contains entries for the directory it refers to, not recursively for all subdirectories.
[ "Navigate", "to", "the", "tree", "or", "blob", "object", "pointed", "to", "by", "the", "given", "target", "path", "for", "the", "given", "commit", ".", "This", "is", "necessary", "because", "each", "git", "tree", "only", "contains", "entries", "for", "the", "directory", "it", "refers", "to", "not", "recursively", "for", "all", "subdirectories", "." ]
a00380b8bf1451d8c3405dace8d5927428506eb0
https://github.com/aspiers/git-deps/blob/a00380b8bf1451d8c3405dace8d5927428506eb0/git_deps/detector.py#L331-L359
17,376
aspiers/git-deps
git_deps/gitutils.py
GitUtils.abbreviate_sha1
def abbreviate_sha1(cls, sha1): """Uniquely abbreviates the given SHA1.""" # For now we invoke git-rev-parse(1), but hopefully eventually # we will be able to do this via pygit2. cmd = ['git', 'rev-parse', '--short', sha1] # cls.logger.debug(" ".join(cmd)) out = subprocess.check_output(cmd, universal_newlines=True).strip() # cls.logger.debug(out) return out
python
def abbreviate_sha1(cls, sha1): """Uniquely abbreviates the given SHA1.""" # For now we invoke git-rev-parse(1), but hopefully eventually # we will be able to do this via pygit2. cmd = ['git', 'rev-parse', '--short', sha1] # cls.logger.debug(" ".join(cmd)) out = subprocess.check_output(cmd, universal_newlines=True).strip() # cls.logger.debug(out) return out
[ "def", "abbreviate_sha1", "(", "cls", ",", "sha1", ")", ":", "# For now we invoke git-rev-parse(1), but hopefully eventually", "# we will be able to do this via pygit2.", "cmd", "=", "[", "'git'", ",", "'rev-parse'", ",", "'--short'", ",", "sha1", "]", "# cls.logger.debug(\" \".join(cmd))", "out", "=", "subprocess", ".", "check_output", "(", "cmd", ",", "universal_newlines", "=", "True", ")", ".", "strip", "(", ")", "# cls.logger.debug(out)", "return", "out" ]
Uniquely abbreviates the given SHA1.
[ "Uniquely", "abbreviates", "the", "given", "SHA1", "." ]
a00380b8bf1451d8c3405dace8d5927428506eb0
https://github.com/aspiers/git-deps/blob/a00380b8bf1451d8c3405dace8d5927428506eb0/git_deps/gitutils.py#L11-L20
17,377
aspiers/git-deps
git_deps/gitutils.py
GitUtils.describe
def describe(cls, sha1): """Returns a human-readable representation of the given SHA1.""" # For now we invoke git-describe(1), but eventually we will be # able to do this via pygit2, since libgit2 already provides # an API for this: # https://github.com/libgit2/pygit2/pull/459#issuecomment-68866929 # https://github.com/libgit2/libgit2/pull/2592 cmd = [ 'git', 'describe', '--all', # look for tags and branches '--long', # remotes/github/master-0-g2b6d591 # '--contains', # '--abbrev', sha1 ] # cls.logger.debug(" ".join(cmd)) out = None try: out = subprocess.check_output( cmd, stderr=subprocess.STDOUT, universal_newlines=True) except subprocess.CalledProcessError as e: if e.output.find('No tags can describe') != -1: return '' raise out = out.strip() out = re.sub(r'^(heads|tags|remotes)/', '', out) # We already have the abbreviated SHA1 from abbreviate_sha1() out = re.sub(r'-g[0-9a-f]{7,}$', '', out) # cls.logger.debug(out) return out
python
def describe(cls, sha1): """Returns a human-readable representation of the given SHA1.""" # For now we invoke git-describe(1), but eventually we will be # able to do this via pygit2, since libgit2 already provides # an API for this: # https://github.com/libgit2/pygit2/pull/459#issuecomment-68866929 # https://github.com/libgit2/libgit2/pull/2592 cmd = [ 'git', 'describe', '--all', # look for tags and branches '--long', # remotes/github/master-0-g2b6d591 # '--contains', # '--abbrev', sha1 ] # cls.logger.debug(" ".join(cmd)) out = None try: out = subprocess.check_output( cmd, stderr=subprocess.STDOUT, universal_newlines=True) except subprocess.CalledProcessError as e: if e.output.find('No tags can describe') != -1: return '' raise out = out.strip() out = re.sub(r'^(heads|tags|remotes)/', '', out) # We already have the abbreviated SHA1 from abbreviate_sha1() out = re.sub(r'-g[0-9a-f]{7,}$', '', out) # cls.logger.debug(out) return out
[ "def", "describe", "(", "cls", ",", "sha1", ")", ":", "# For now we invoke git-describe(1), but eventually we will be", "# able to do this via pygit2, since libgit2 already provides", "# an API for this:", "# https://github.com/libgit2/pygit2/pull/459#issuecomment-68866929", "# https://github.com/libgit2/libgit2/pull/2592", "cmd", "=", "[", "'git'", ",", "'describe'", ",", "'--all'", ",", "# look for tags and branches", "'--long'", ",", "# remotes/github/master-0-g2b6d591", "# '--contains',", "# '--abbrev',", "sha1", "]", "# cls.logger.debug(\" \".join(cmd))", "out", "=", "None", "try", ":", "out", "=", "subprocess", ".", "check_output", "(", "cmd", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "universal_newlines", "=", "True", ")", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "if", "e", ".", "output", ".", "find", "(", "'No tags can describe'", ")", "!=", "-", "1", ":", "return", "''", "raise", "out", "=", "out", ".", "strip", "(", ")", "out", "=", "re", ".", "sub", "(", "r'^(heads|tags|remotes)/'", ",", "''", ",", "out", ")", "# We already have the abbreviated SHA1 from abbreviate_sha1()", "out", "=", "re", ".", "sub", "(", "r'-g[0-9a-f]{7,}$'", ",", "''", ",", "out", ")", "# cls.logger.debug(out)", "return", "out" ]
Returns a human-readable representation of the given SHA1.
[ "Returns", "a", "human", "-", "readable", "representation", "of", "the", "given", "SHA1", "." ]
a00380b8bf1451d8c3405dace8d5927428506eb0
https://github.com/aspiers/git-deps/blob/a00380b8bf1451d8c3405dace8d5927428506eb0/git_deps/gitutils.py#L23-L54
17,378
aspiers/git-deps
git_deps/gitutils.py
GitUtils.refs_to
def refs_to(cls, sha1, repo): """Returns all refs pointing to the given SHA1.""" matching = [] for refname in repo.listall_references(): symref = repo.lookup_reference(refname) dref = symref.resolve() oid = dref.target commit = repo.get(oid) if commit.hex == sha1: matching.append(symref.shorthand) return matching
python
def refs_to(cls, sha1, repo): """Returns all refs pointing to the given SHA1.""" matching = [] for refname in repo.listall_references(): symref = repo.lookup_reference(refname) dref = symref.resolve() oid = dref.target commit = repo.get(oid) if commit.hex == sha1: matching.append(symref.shorthand) return matching
[ "def", "refs_to", "(", "cls", ",", "sha1", ",", "repo", ")", ":", "matching", "=", "[", "]", "for", "refname", "in", "repo", ".", "listall_references", "(", ")", ":", "symref", "=", "repo", ".", "lookup_reference", "(", "refname", ")", "dref", "=", "symref", ".", "resolve", "(", ")", "oid", "=", "dref", ".", "target", "commit", "=", "repo", ".", "get", "(", "oid", ")", "if", "commit", ".", "hex", "==", "sha1", ":", "matching", ".", "append", "(", "symref", ".", "shorthand", ")", "return", "matching" ]
Returns all refs pointing to the given SHA1.
[ "Returns", "all", "refs", "pointing", "to", "the", "given", "SHA1", "." ]
a00380b8bf1451d8c3405dace8d5927428506eb0
https://github.com/aspiers/git-deps/blob/a00380b8bf1451d8c3405dace8d5927428506eb0/git_deps/gitutils.py#L69-L80
17,379
aspiers/git-deps
git_deps/listener/json.py
JSONDependencyListener.add_commit
def add_commit(self, commit): """Adds the commit to the commits array if it doesn't already exist, and returns the commit's index in the array. """ sha1 = commit.hex if sha1 in self._commits: return self._commits[sha1] title, separator, body = commit.message.partition("\n") commit = { 'explored': False, 'sha1': sha1, 'name': GitUtils.abbreviate_sha1(sha1), 'describe': GitUtils.describe(sha1), 'refs': GitUtils.refs_to(sha1, self.repo()), 'author_name': commit.author.name, 'author_mail': commit.author.email, 'author_time': commit.author.time, 'author_offset': commit.author.offset, 'committer_name': commit.committer.name, 'committer_mail': commit.committer.email, 'committer_time': commit.committer.time, 'committer_offset': commit.committer.offset, # 'message': commit.message, 'title': title, 'separator': separator, 'body': body.lstrip("\n"), } self._json['commits'].append(commit) self._commits[sha1] = len(self._json['commits']) - 1 return self._commits[sha1]
python
def add_commit(self, commit): """Adds the commit to the commits array if it doesn't already exist, and returns the commit's index in the array. """ sha1 = commit.hex if sha1 in self._commits: return self._commits[sha1] title, separator, body = commit.message.partition("\n") commit = { 'explored': False, 'sha1': sha1, 'name': GitUtils.abbreviate_sha1(sha1), 'describe': GitUtils.describe(sha1), 'refs': GitUtils.refs_to(sha1, self.repo()), 'author_name': commit.author.name, 'author_mail': commit.author.email, 'author_time': commit.author.time, 'author_offset': commit.author.offset, 'committer_name': commit.committer.name, 'committer_mail': commit.committer.email, 'committer_time': commit.committer.time, 'committer_offset': commit.committer.offset, # 'message': commit.message, 'title': title, 'separator': separator, 'body': body.lstrip("\n"), } self._json['commits'].append(commit) self._commits[sha1] = len(self._json['commits']) - 1 return self._commits[sha1]
[ "def", "add_commit", "(", "self", ",", "commit", ")", ":", "sha1", "=", "commit", ".", "hex", "if", "sha1", "in", "self", ".", "_commits", ":", "return", "self", ".", "_commits", "[", "sha1", "]", "title", ",", "separator", ",", "body", "=", "commit", ".", "message", ".", "partition", "(", "\"\\n\"", ")", "commit", "=", "{", "'explored'", ":", "False", ",", "'sha1'", ":", "sha1", ",", "'name'", ":", "GitUtils", ".", "abbreviate_sha1", "(", "sha1", ")", ",", "'describe'", ":", "GitUtils", ".", "describe", "(", "sha1", ")", ",", "'refs'", ":", "GitUtils", ".", "refs_to", "(", "sha1", ",", "self", ".", "repo", "(", ")", ")", ",", "'author_name'", ":", "commit", ".", "author", ".", "name", ",", "'author_mail'", ":", "commit", ".", "author", ".", "email", ",", "'author_time'", ":", "commit", ".", "author", ".", "time", ",", "'author_offset'", ":", "commit", ".", "author", ".", "offset", ",", "'committer_name'", ":", "commit", ".", "committer", ".", "name", ",", "'committer_mail'", ":", "commit", ".", "committer", ".", "email", ",", "'committer_time'", ":", "commit", ".", "committer", ".", "time", ",", "'committer_offset'", ":", "commit", ".", "committer", ".", "offset", ",", "# 'message': commit.message,", "'title'", ":", "title", ",", "'separator'", ":", "separator", ",", "'body'", ":", "body", ".", "lstrip", "(", "\"\\n\"", ")", ",", "}", "self", ".", "_json", "[", "'commits'", "]", ".", "append", "(", "commit", ")", "self", ".", "_commits", "[", "sha1", "]", "=", "len", "(", "self", ".", "_json", "[", "'commits'", "]", ")", "-", "1", "return", "self", ".", "_commits", "[", "sha1", "]" ]
Adds the commit to the commits array if it doesn't already exist, and returns the commit's index in the array.
[ "Adds", "the", "commit", "to", "the", "commits", "array", "if", "it", "doesn", "t", "already", "exist", "and", "returns", "the", "commit", "s", "index", "in", "the", "array", "." ]
a00380b8bf1451d8c3405dace8d5927428506eb0
https://github.com/aspiers/git-deps/blob/a00380b8bf1451d8c3405dace8d5927428506eb0/git_deps/listener/json.py#L30-L59
17,380
gocardless/gocardless-pro-python
gocardless_pro/api_client.py
ApiClient.get
def get(self, path, params=None, headers=None): """Perform a GET request, optionally providing query-string params. Args: path (str): A path that gets appended to ``base_url``. params (dict, optional): Dictionary of param names to values. Example: api_client.get('/users', params={'active': True}) Returns: A requests ``Response`` object. """ response = requests.get( self._url_for(path), params=params, headers=self._headers(headers) ) self._handle_errors(response) return response
python
def get(self, path, params=None, headers=None): """Perform a GET request, optionally providing query-string params. Args: path (str): A path that gets appended to ``base_url``. params (dict, optional): Dictionary of param names to values. Example: api_client.get('/users', params={'active': True}) Returns: A requests ``Response`` object. """ response = requests.get( self._url_for(path), params=params, headers=self._headers(headers) ) self._handle_errors(response) return response
[ "def", "get", "(", "self", ",", "path", ",", "params", "=", "None", ",", "headers", "=", "None", ")", ":", "response", "=", "requests", ".", "get", "(", "self", ".", "_url_for", "(", "path", ")", ",", "params", "=", "params", ",", "headers", "=", "self", ".", "_headers", "(", "headers", ")", ")", "self", ".", "_handle_errors", "(", "response", ")", "return", "response" ]
Perform a GET request, optionally providing query-string params. Args: path (str): A path that gets appended to ``base_url``. params (dict, optional): Dictionary of param names to values. Example: api_client.get('/users', params={'active': True}) Returns: A requests ``Response`` object.
[ "Perform", "a", "GET", "request", "optionally", "providing", "query", "-", "string", "params", "." ]
7b57f037d14875eea8d659084eeb524f3ce17f4a
https://github.com/gocardless/gocardless-pro-python/blob/7b57f037d14875eea8d659084eeb524f3ce17f4a/gocardless_pro/api_client.py#L30-L49
17,381
gocardless/gocardless-pro-python
gocardless_pro/api_client.py
ApiClient.post
def post(self, path, body, headers=None): """Perform a POST request, providing a body, which will be JSON-encoded. Args: path (str): A path that gets appended to ``base_url``. body (dict): Dictionary that will be JSON-encoded and sent as the body. Example: api_client.post('/users', body={'name': 'Billy Jean'}) Returns: A requests ``Response`` object. """ response = requests.post( self._url_for(path), data=json.dumps(body), headers=self._headers(headers) ) self._handle_errors(response) return response
python
def post(self, path, body, headers=None): """Perform a POST request, providing a body, which will be JSON-encoded. Args: path (str): A path that gets appended to ``base_url``. body (dict): Dictionary that will be JSON-encoded and sent as the body. Example: api_client.post('/users', body={'name': 'Billy Jean'}) Returns: A requests ``Response`` object. """ response = requests.post( self._url_for(path), data=json.dumps(body), headers=self._headers(headers) ) self._handle_errors(response) return response
[ "def", "post", "(", "self", ",", "path", ",", "body", ",", "headers", "=", "None", ")", ":", "response", "=", "requests", ".", "post", "(", "self", ".", "_url_for", "(", "path", ")", ",", "data", "=", "json", ".", "dumps", "(", "body", ")", ",", "headers", "=", "self", ".", "_headers", "(", "headers", ")", ")", "self", ".", "_handle_errors", "(", "response", ")", "return", "response" ]
Perform a POST request, providing a body, which will be JSON-encoded. Args: path (str): A path that gets appended to ``base_url``. body (dict): Dictionary that will be JSON-encoded and sent as the body. Example: api_client.post('/users', body={'name': 'Billy Jean'}) Returns: A requests ``Response`` object.
[ "Perform", "a", "POST", "request", "providing", "a", "body", "which", "will", "be", "JSON", "-", "encoded", "." ]
7b57f037d14875eea8d659084eeb524f3ce17f4a
https://github.com/gocardless/gocardless-pro-python/blob/7b57f037d14875eea8d659084eeb524f3ce17f4a/gocardless_pro/api_client.py#L51-L71
17,382
gocardless/gocardless-pro-python
gocardless_pro/services/creditor_bank_accounts_service.py
CreditorBankAccountsService.create
def create(self,params=None, headers=None): """Create a creditor bank account. Creates a new creditor bank account object. Args: params (dict, optional): Request body. Returns: ListResponse of CreditorBankAccount instances """ path = '/creditor_bank_accounts' if params is not None: params = {self._envelope_key(): params} try: response = self._perform_request('POST', path, params, headers, retry_failures=True) except errors.IdempotentCreationConflictError as err: return self.get(identity=err.conflicting_resource_id, params=params, headers=headers) return self._resource_for(response)
python
def create(self,params=None, headers=None): """Create a creditor bank account. Creates a new creditor bank account object. Args: params (dict, optional): Request body. Returns: ListResponse of CreditorBankAccount instances """ path = '/creditor_bank_accounts' if params is not None: params = {self._envelope_key(): params} try: response = self._perform_request('POST', path, params, headers, retry_failures=True) except errors.IdempotentCreationConflictError as err: return self.get(identity=err.conflicting_resource_id, params=params, headers=headers) return self._resource_for(response)
[ "def", "create", "(", "self", ",", "params", "=", "None", ",", "headers", "=", "None", ")", ":", "path", "=", "'/creditor_bank_accounts'", "if", "params", "is", "not", "None", ":", "params", "=", "{", "self", ".", "_envelope_key", "(", ")", ":", "params", "}", "try", ":", "response", "=", "self", ".", "_perform_request", "(", "'POST'", ",", "path", ",", "params", ",", "headers", ",", "retry_failures", "=", "True", ")", "except", "errors", ".", "IdempotentCreationConflictError", "as", "err", ":", "return", "self", ".", "get", "(", "identity", "=", "err", ".", "conflicting_resource_id", ",", "params", "=", "params", ",", "headers", "=", "headers", ")", "return", "self", ".", "_resource_for", "(", "response", ")" ]
Create a creditor bank account. Creates a new creditor bank account object. Args: params (dict, optional): Request body. Returns: ListResponse of CreditorBankAccount instances
[ "Create", "a", "creditor", "bank", "account", "." ]
7b57f037d14875eea8d659084eeb524f3ce17f4a
https://github.com/gocardless/gocardless-pro-python/blob/7b57f037d14875eea8d659084eeb524f3ce17f4a/gocardless_pro/services/creditor_bank_accounts_service.py#L20-L43
17,383
gocardless/gocardless-pro-python
gocardless_pro/services/creditor_bank_accounts_service.py
CreditorBankAccountsService.list
def list(self,params=None, headers=None): """List creditor bank accounts. Returns a [cursor-paginated](#api-usage-cursor-pagination) list of your creditor bank accounts. Args: params (dict, optional): Query string parameters. Returns: CreditorBankAccount """ path = '/creditor_bank_accounts' response = self._perform_request('GET', path, params, headers, retry_failures=True) return self._resource_for(response)
python
def list(self,params=None, headers=None): """List creditor bank accounts. Returns a [cursor-paginated](#api-usage-cursor-pagination) list of your creditor bank accounts. Args: params (dict, optional): Query string parameters. Returns: CreditorBankAccount """ path = '/creditor_bank_accounts' response = self._perform_request('GET', path, params, headers, retry_failures=True) return self._resource_for(response)
[ "def", "list", "(", "self", ",", "params", "=", "None", ",", "headers", "=", "None", ")", ":", "path", "=", "'/creditor_bank_accounts'", "response", "=", "self", ".", "_perform_request", "(", "'GET'", ",", "path", ",", "params", ",", "headers", ",", "retry_failures", "=", "True", ")", "return", "self", ".", "_resource_for", "(", "response", ")" ]
List creditor bank accounts. Returns a [cursor-paginated](#api-usage-cursor-pagination) list of your creditor bank accounts. Args: params (dict, optional): Query string parameters. Returns: CreditorBankAccount
[ "List", "creditor", "bank", "accounts", "." ]
7b57f037d14875eea8d659084eeb524f3ce17f4a
https://github.com/gocardless/gocardless-pro-python/blob/7b57f037d14875eea8d659084eeb524f3ce17f4a/gocardless_pro/services/creditor_bank_accounts_service.py#L46-L63
17,384
gocardless/gocardless-pro-python
gocardless_pro/services/creditor_bank_accounts_service.py
CreditorBankAccountsService.get
def get(self,identity,params=None, headers=None): """Get a single creditor bank account. Retrieves the details of an existing creditor bank account. Args: identity (string): Unique identifier, beginning with "BA". params (dict, optional): Query string parameters. Returns: ListResponse of CreditorBankAccount instances """ path = self._sub_url_params('/creditor_bank_accounts/:identity', { 'identity': identity, }) response = self._perform_request('GET', path, params, headers, retry_failures=True) return self._resource_for(response)
python
def get(self,identity,params=None, headers=None): """Get a single creditor bank account. Retrieves the details of an existing creditor bank account. Args: identity (string): Unique identifier, beginning with "BA". params (dict, optional): Query string parameters. Returns: ListResponse of CreditorBankAccount instances """ path = self._sub_url_params('/creditor_bank_accounts/:identity', { 'identity': identity, }) response = self._perform_request('GET', path, params, headers, retry_failures=True) return self._resource_for(response)
[ "def", "get", "(", "self", ",", "identity", ",", "params", "=", "None", ",", "headers", "=", "None", ")", ":", "path", "=", "self", ".", "_sub_url_params", "(", "'/creditor_bank_accounts/:identity'", ",", "{", "'identity'", ":", "identity", ",", "}", ")", "response", "=", "self", ".", "_perform_request", "(", "'GET'", ",", "path", ",", "params", ",", "headers", ",", "retry_failures", "=", "True", ")", "return", "self", ".", "_resource_for", "(", "response", ")" ]
Get a single creditor bank account. Retrieves the details of an existing creditor bank account. Args: identity (string): Unique identifier, beginning with "BA". params (dict, optional): Query string parameters. Returns: ListResponse of CreditorBankAccount instances
[ "Get", "a", "single", "creditor", "bank", "account", "." ]
7b57f037d14875eea8d659084eeb524f3ce17f4a
https://github.com/gocardless/gocardless-pro-python/blob/7b57f037d14875eea8d659084eeb524f3ce17f4a/gocardless_pro/services/creditor_bank_accounts_service.py#L72-L92
17,385
gocardless/gocardless-pro-python
gocardless_pro/services/creditor_bank_accounts_service.py
CreditorBankAccountsService.disable
def disable(self,identity,params=None, headers=None): """Disable a creditor bank account. Immediately disables the bank account, no money can be paid out to a disabled account. This will return a `disable_failed` error if the bank account has already been disabled. A disabled bank account can be re-enabled by creating a new bank account resource with the same details. Args: identity (string): Unique identifier, beginning with "BA". params (dict, optional): Request body. Returns: ListResponse of CreditorBankAccount instances """ path = self._sub_url_params('/creditor_bank_accounts/:identity/actions/disable', { 'identity': identity, }) if params is not None: params = {'data': params} response = self._perform_request('POST', path, params, headers, retry_failures=False) return self._resource_for(response)
python
def disable(self,identity,params=None, headers=None): """Disable a creditor bank account. Immediately disables the bank account, no money can be paid out to a disabled account. This will return a `disable_failed` error if the bank account has already been disabled. A disabled bank account can be re-enabled by creating a new bank account resource with the same details. Args: identity (string): Unique identifier, beginning with "BA". params (dict, optional): Request body. Returns: ListResponse of CreditorBankAccount instances """ path = self._sub_url_params('/creditor_bank_accounts/:identity/actions/disable', { 'identity': identity, }) if params is not None: params = {'data': params} response = self._perform_request('POST', path, params, headers, retry_failures=False) return self._resource_for(response)
[ "def", "disable", "(", "self", ",", "identity", ",", "params", "=", "None", ",", "headers", "=", "None", ")", ":", "path", "=", "self", ".", "_sub_url_params", "(", "'/creditor_bank_accounts/:identity/actions/disable'", ",", "{", "'identity'", ":", "identity", ",", "}", ")", "if", "params", "is", "not", "None", ":", "params", "=", "{", "'data'", ":", "params", "}", "response", "=", "self", ".", "_perform_request", "(", "'POST'", ",", "path", ",", "params", ",", "headers", ",", "retry_failures", "=", "False", ")", "return", "self", ".", "_resource_for", "(", "response", ")" ]
Disable a creditor bank account. Immediately disables the bank account, no money can be paid out to a disabled account. This will return a `disable_failed` error if the bank account has already been disabled. A disabled bank account can be re-enabled by creating a new bank account resource with the same details. Args: identity (string): Unique identifier, beginning with "BA". params (dict, optional): Request body. Returns: ListResponse of CreditorBankAccount instances
[ "Disable", "a", "creditor", "bank", "account", "." ]
7b57f037d14875eea8d659084eeb524f3ce17f4a
https://github.com/gocardless/gocardless-pro-python/blob/7b57f037d14875eea8d659084eeb524f3ce17f4a/gocardless_pro/services/creditor_bank_accounts_service.py#L95-L123
17,386
gocardless/gocardless-pro-python
gocardless_pro/services/mandate_pdfs_service.py
MandatePdfsService.create
def create(self,params=None, headers=None): """Create a mandate PDF. Generates a PDF mandate and returns its temporary URL. Customer and bank account details can be left blank (for a blank mandate), provided manually, or inferred from the ID of an existing [mandate](#core-endpoints-mandates). By default, we'll generate PDF mandates in English. To generate a PDF mandate in another language, set the `Accept-Language` header when creating the PDF mandate to the relevant [ISO 639-1](http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) language code supported for the scheme. | Scheme | Supported languages | | :--------------- | :------------------------------------------------------------------------------------------------------------------------------------------- | | Autogiro | English (`en`), Swedish (`sv`) | | Bacs | English (`en`) | | BECS | English (`en`) | | BECS NZ | English (`en`) | | Betalingsservice | Danish (`da`), English (`en`) | | PAD | English (`en`) | | SEPA Core | Danish (`da`), Dutch (`nl`), English (`en`), French (`fr`), German (`de`), Italian (`it`), Portuguese (`pt`), Spanish (`es`), Swedish (`sv`) | Args: params (dict, optional): Request body. Returns: ListResponse of MandatePdf instances """ path = '/mandate_pdfs' if params is not None: params = {self._envelope_key(): params} response = self._perform_request('POST', path, params, headers, retry_failures=True) return self._resource_for(response)
python
def create(self,params=None, headers=None): """Create a mandate PDF. Generates a PDF mandate and returns its temporary URL. Customer and bank account details can be left blank (for a blank mandate), provided manually, or inferred from the ID of an existing [mandate](#core-endpoints-mandates). By default, we'll generate PDF mandates in English. To generate a PDF mandate in another language, set the `Accept-Language` header when creating the PDF mandate to the relevant [ISO 639-1](http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) language code supported for the scheme. | Scheme | Supported languages | | :--------------- | :------------------------------------------------------------------------------------------------------------------------------------------- | | Autogiro | English (`en`), Swedish (`sv`) | | Bacs | English (`en`) | | BECS | English (`en`) | | BECS NZ | English (`en`) | | Betalingsservice | Danish (`da`), English (`en`) | | PAD | English (`en`) | | SEPA Core | Danish (`da`), Dutch (`nl`), English (`en`), French (`fr`), German (`de`), Italian (`it`), Portuguese (`pt`), Spanish (`es`), Swedish (`sv`) | Args: params (dict, optional): Request body. Returns: ListResponse of MandatePdf instances """ path = '/mandate_pdfs' if params is not None: params = {self._envelope_key(): params} response = self._perform_request('POST', path, params, headers, retry_failures=True) return self._resource_for(response)
[ "def", "create", "(", "self", ",", "params", "=", "None", ",", "headers", "=", "None", ")", ":", "path", "=", "'/mandate_pdfs'", "if", "params", "is", "not", "None", ":", "params", "=", "{", "self", ".", "_envelope_key", "(", ")", ":", "params", "}", "response", "=", "self", ".", "_perform_request", "(", "'POST'", ",", "path", ",", "params", ",", "headers", ",", "retry_failures", "=", "True", ")", "return", "self", ".", "_resource_for", "(", "response", ")" ]
Create a mandate PDF. Generates a PDF mandate and returns its temporary URL. Customer and bank account details can be left blank (for a blank mandate), provided manually, or inferred from the ID of an existing [mandate](#core-endpoints-mandates). By default, we'll generate PDF mandates in English. To generate a PDF mandate in another language, set the `Accept-Language` header when creating the PDF mandate to the relevant [ISO 639-1](http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) language code supported for the scheme. | Scheme | Supported languages | | :--------------- | :------------------------------------------------------------------------------------------------------------------------------------------- | | Autogiro | English (`en`), Swedish (`sv`) | | Bacs | English (`en`) | | BECS | English (`en`) | | BECS NZ | English (`en`) | | Betalingsservice | Danish (`da`), English (`en`) | | PAD | English (`en`) | | SEPA Core | Danish (`da`), Dutch (`nl`), English (`en`), French (`fr`), German (`de`), Italian (`it`), Portuguese (`pt`), Spanish (`es`), Swedish (`sv`) | Args: params (dict, optional): Request body. Returns: ListResponse of MandatePdf instances
[ "Create", "a", "mandate", "PDF", "." ]
7b57f037d14875eea8d659084eeb524f3ce17f4a
https://github.com/gocardless/gocardless-pro-python/blob/7b57f037d14875eea8d659084eeb524f3ce17f4a/gocardless_pro/services/mandate_pdfs_service.py#L20-L77
17,387
gocardless/gocardless-pro-python
gocardless_pro/services/payments_service.py
PaymentsService.update
def update(self,identity,params=None, headers=None): """Update a payment. Updates a payment object. This accepts only the metadata parameter. Args: identity (string): Unique identifier, beginning with "PM". params (dict, optional): Request body. Returns: ListResponse of Payment instances """ path = self._sub_url_params('/payments/:identity', { 'identity': identity, }) if params is not None: params = {self._envelope_key(): params} response = self._perform_request('PUT', path, params, headers, retry_failures=True) return self._resource_for(response)
python
def update(self,identity,params=None, headers=None): """Update a payment. Updates a payment object. This accepts only the metadata parameter. Args: identity (string): Unique identifier, beginning with "PM". params (dict, optional): Request body. Returns: ListResponse of Payment instances """ path = self._sub_url_params('/payments/:identity', { 'identity': identity, }) if params is not None: params = {self._envelope_key(): params} response = self._perform_request('PUT', path, params, headers, retry_failures=True) return self._resource_for(response)
[ "def", "update", "(", "self", ",", "identity", ",", "params", "=", "None", ",", "headers", "=", "None", ")", ":", "path", "=", "self", ".", "_sub_url_params", "(", "'/payments/:identity'", ",", "{", "'identity'", ":", "identity", ",", "}", ")", "if", "params", "is", "not", "None", ":", "params", "=", "{", "self", ".", "_envelope_key", "(", ")", ":", "params", "}", "response", "=", "self", ".", "_perform_request", "(", "'PUT'", ",", "path", ",", "params", ",", "headers", ",", "retry_failures", "=", "True", ")", "return", "self", ".", "_resource_for", "(", "response", ")" ]
Update a payment. Updates a payment object. This accepts only the metadata parameter. Args: identity (string): Unique identifier, beginning with "PM". params (dict, optional): Request body. Returns: ListResponse of Payment instances
[ "Update", "a", "payment", "." ]
7b57f037d14875eea8d659084eeb524f3ce17f4a
https://github.com/gocardless/gocardless-pro-python/blob/7b57f037d14875eea8d659084eeb524f3ce17f4a/gocardless_pro/services/payments_service.py#L101-L123
17,388
prashnts/hues
hues/console.py
Config.resolve_config
def resolve_config(self): '''Resolve configuration params to native instances''' conf = self.load_config(self.force_default) for k in conf['hues']: conf['hues'][k] = getattr(KEYWORDS, conf['hues'][k]) as_tuples = lambda name, obj: namedtuple(name, obj.keys())(**obj) self.hues = as_tuples('Hues', conf['hues']) self.opts = as_tuples('Options', conf['options']) self.labels = as_tuples('Labels', conf['labels'])
python
def resolve_config(self): '''Resolve configuration params to native instances''' conf = self.load_config(self.force_default) for k in conf['hues']: conf['hues'][k] = getattr(KEYWORDS, conf['hues'][k]) as_tuples = lambda name, obj: namedtuple(name, obj.keys())(**obj) self.hues = as_tuples('Hues', conf['hues']) self.opts = as_tuples('Options', conf['options']) self.labels = as_tuples('Labels', conf['labels'])
[ "def", "resolve_config", "(", "self", ")", ":", "conf", "=", "self", ".", "load_config", "(", "self", ".", "force_default", ")", "for", "k", "in", "conf", "[", "'hues'", "]", ":", "conf", "[", "'hues'", "]", "[", "k", "]", "=", "getattr", "(", "KEYWORDS", ",", "conf", "[", "'hues'", "]", "[", "k", "]", ")", "as_tuples", "=", "lambda", "name", ",", "obj", ":", "namedtuple", "(", "name", ",", "obj", ".", "keys", "(", ")", ")", "(", "*", "*", "obj", ")", "self", ".", "hues", "=", "as_tuples", "(", "'Hues'", ",", "conf", "[", "'hues'", "]", ")", "self", ".", "opts", "=", "as_tuples", "(", "'Options'", ",", "conf", "[", "'options'", "]", ")", "self", ".", "labels", "=", "as_tuples", "(", "'Labels'", ",", "conf", "[", "'labels'", "]", ")" ]
Resolve configuration params to native instances
[ "Resolve", "configuration", "params", "to", "native", "instances" ]
888049a41e3f2bf33546e53ef3c17494ee8c8790
https://github.com/prashnts/hues/blob/888049a41e3f2bf33546e53ef3c17494ee8c8790/hues/console.py#L72-L81
17,389
prashnts/hues
hues/dpda.py
apply
def apply(funcs, stack): '''Apply functions to the stack, passing the resulting stack to next state.''' return reduce(lambda x, y: y(x), funcs, stack)
python
def apply(funcs, stack): '''Apply functions to the stack, passing the resulting stack to next state.''' return reduce(lambda x, y: y(x), funcs, stack)
[ "def", "apply", "(", "funcs", ",", "stack", ")", ":", "return", "reduce", "(", "lambda", "x", ",", "y", ":", "y", "(", "x", ")", ",", "funcs", ",", "stack", ")" ]
Apply functions to the stack, passing the resulting stack to next state.
[ "Apply", "functions", "to", "the", "stack", "passing", "the", "resulting", "stack", "to", "next", "state", "." ]
888049a41e3f2bf33546e53ef3c17494ee8c8790
https://github.com/prashnts/hues/blob/888049a41e3f2bf33546e53ef3c17494ee8c8790/hues/dpda.py#L41-L43
17,390
prashnts/hues
hues/huestr.py
colorize
def colorize(string, stack): '''Apply optimal ANSI escape sequences to the string.''' codes = optimize(stack) if len(codes): prefix = SEQ % ';'.join(map(str, codes)) suffix = SEQ % STYLE.reset return prefix + string + suffix else: return string
python
def colorize(string, stack): '''Apply optimal ANSI escape sequences to the string.''' codes = optimize(stack) if len(codes): prefix = SEQ % ';'.join(map(str, codes)) suffix = SEQ % STYLE.reset return prefix + string + suffix else: return string
[ "def", "colorize", "(", "string", ",", "stack", ")", ":", "codes", "=", "optimize", "(", "stack", ")", "if", "len", "(", "codes", ")", ":", "prefix", "=", "SEQ", "%", "';'", ".", "join", "(", "map", "(", "str", ",", "codes", ")", ")", "suffix", "=", "SEQ", "%", "STYLE", ".", "reset", "return", "prefix", "+", "string", "+", "suffix", "else", ":", "return", "string" ]
Apply optimal ANSI escape sequences to the string.
[ "Apply", "optimal", "ANSI", "escape", "sequences", "to", "the", "string", "." ]
888049a41e3f2bf33546e53ef3c17494ee8c8790
https://github.com/prashnts/hues/blob/888049a41e3f2bf33546e53ef3c17494ee8c8790/hues/huestr.py#L21-L29
17,391
SpikeInterface/spiketoolkit
spiketoolkit/comparison/comparisontools.py
compute_agreement_score
def compute_agreement_score(num_matches, num1, num2): """ Agreement score is used as a criteria to match unit1 and unit2. """ denom = num1 + num2 - num_matches if denom == 0: return 0 return num_matches / denom
python
def compute_agreement_score(num_matches, num1, num2): """ Agreement score is used as a criteria to match unit1 and unit2. """ denom = num1 + num2 - num_matches if denom == 0: return 0 return num_matches / denom
[ "def", "compute_agreement_score", "(", "num_matches", ",", "num1", ",", "num2", ")", ":", "denom", "=", "num1", "+", "num2", "-", "num_matches", "if", "denom", "==", "0", ":", "return", "0", "return", "num_matches", "/", "denom" ]
Agreement score is used as a criteria to match unit1 and unit2.
[ "Agreement", "score", "is", "used", "as", "a", "criteria", "to", "match", "unit1", "and", "unit2", "." ]
f7c054383d1ebca640966b057c087fa187955d13
https://github.com/SpikeInterface/spiketoolkit/blob/f7c054383d1ebca640966b057c087fa187955d13/spiketoolkit/comparison/comparisontools.py#L25-L32
17,392
SpikeInterface/spiketoolkit
spiketoolkit/sorters/launcher.py
collect_results
def collect_results(working_folder): """ Collect results in a working_folder. The output is nested dict[rec_name][sorter_name] of SortingExtrator. """ results = {} working_folder = Path(working_folder) output_folders = working_folder/'output_folders' for rec_name in os.listdir(output_folders): if not os.path.isdir(output_folders / rec_name): continue # print(rec_name) results[rec_name] = {} for sorter_name in os.listdir(output_folders / rec_name): # print(' ', sorter_name) output_folder = output_folders / rec_name / sorter_name #~ print(output_folder) if not os.path.isdir(output_folder): continue SorterClass = sorter_dict[sorter_name] results[rec_name][sorter_name] = SorterClass.get_result_from_folder(output_folder) return results
python
def collect_results(working_folder): """ Collect results in a working_folder. The output is nested dict[rec_name][sorter_name] of SortingExtrator. """ results = {} working_folder = Path(working_folder) output_folders = working_folder/'output_folders' for rec_name in os.listdir(output_folders): if not os.path.isdir(output_folders / rec_name): continue # print(rec_name) results[rec_name] = {} for sorter_name in os.listdir(output_folders / rec_name): # print(' ', sorter_name) output_folder = output_folders / rec_name / sorter_name #~ print(output_folder) if not os.path.isdir(output_folder): continue SorterClass = sorter_dict[sorter_name] results[rec_name][sorter_name] = SorterClass.get_result_from_folder(output_folder) return results
[ "def", "collect_results", "(", "working_folder", ")", ":", "results", "=", "{", "}", "working_folder", "=", "Path", "(", "working_folder", ")", "output_folders", "=", "working_folder", "/", "'output_folders'", "for", "rec_name", "in", "os", ".", "listdir", "(", "output_folders", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "output_folders", "/", "rec_name", ")", ":", "continue", "# print(rec_name)", "results", "[", "rec_name", "]", "=", "{", "}", "for", "sorter_name", "in", "os", ".", "listdir", "(", "output_folders", "/", "rec_name", ")", ":", "# print(' ', sorter_name)", "output_folder", "=", "output_folders", "/", "rec_name", "/", "sorter_name", "#~ print(output_folder)", "if", "not", "os", ".", "path", ".", "isdir", "(", "output_folder", ")", ":", "continue", "SorterClass", "=", "sorter_dict", "[", "sorter_name", "]", "results", "[", "rec_name", "]", "[", "sorter_name", "]", "=", "SorterClass", ".", "get_result_from_folder", "(", "output_folder", ")", "return", "results" ]
Collect results in a working_folder. The output is nested dict[rec_name][sorter_name] of SortingExtrator.
[ "Collect", "results", "in", "a", "working_folder", "." ]
f7c054383d1ebca640966b057c087fa187955d13
https://github.com/SpikeInterface/spiketoolkit/blob/f7c054383d1ebca640966b057c087fa187955d13/spiketoolkit/sorters/launcher.py#L181-L206
17,393
SpikeInterface/spiketoolkit
spiketoolkit/sorters/sorterlist.py
run_sorter
def run_sorter(sorter_name_or_class, recording, output_folder=None, delete_output_folder=False, grouping_property=None, parallel=False, debug=False, **params): """ Generic function to run a sorter via function approach. 2 Usage with name or class: by name: >>> sorting = run_sorter('tridesclous', recording) by class: >>> sorting = run_sorter(TridesclousSorter, recording) """ if isinstance(sorter_name_or_class, str): SorterClass = sorter_dict[sorter_name_or_class] elif sorter_name_or_class in sorter_full_list: SorterClass = sorter_name_or_class else: raise(ValueError('Unknown sorter')) sorter = SorterClass(recording=recording, output_folder=output_folder, grouping_property=grouping_property, parallel=parallel, debug=debug, delete_output_folder=delete_output_folder) sorter.set_params(**params) sorter.run() sortingextractor = sorter.get_result() return sortingextractor
python
def run_sorter(sorter_name_or_class, recording, output_folder=None, delete_output_folder=False, grouping_property=None, parallel=False, debug=False, **params): """ Generic function to run a sorter via function approach. 2 Usage with name or class: by name: >>> sorting = run_sorter('tridesclous', recording) by class: >>> sorting = run_sorter(TridesclousSorter, recording) """ if isinstance(sorter_name_or_class, str): SorterClass = sorter_dict[sorter_name_or_class] elif sorter_name_or_class in sorter_full_list: SorterClass = sorter_name_or_class else: raise(ValueError('Unknown sorter')) sorter = SorterClass(recording=recording, output_folder=output_folder, grouping_property=grouping_property, parallel=parallel, debug=debug, delete_output_folder=delete_output_folder) sorter.set_params(**params) sorter.run() sortingextractor = sorter.get_result() return sortingextractor
[ "def", "run_sorter", "(", "sorter_name_or_class", ",", "recording", ",", "output_folder", "=", "None", ",", "delete_output_folder", "=", "False", ",", "grouping_property", "=", "None", ",", "parallel", "=", "False", ",", "debug", "=", "False", ",", "*", "*", "params", ")", ":", "if", "isinstance", "(", "sorter_name_or_class", ",", "str", ")", ":", "SorterClass", "=", "sorter_dict", "[", "sorter_name_or_class", "]", "elif", "sorter_name_or_class", "in", "sorter_full_list", ":", "SorterClass", "=", "sorter_name_or_class", "else", ":", "raise", "(", "ValueError", "(", "'Unknown sorter'", ")", ")", "sorter", "=", "SorterClass", "(", "recording", "=", "recording", ",", "output_folder", "=", "output_folder", ",", "grouping_property", "=", "grouping_property", ",", "parallel", "=", "parallel", ",", "debug", "=", "debug", ",", "delete_output_folder", "=", "delete_output_folder", ")", "sorter", ".", "set_params", "(", "*", "*", "params", ")", "sorter", ".", "run", "(", ")", "sortingextractor", "=", "sorter", ".", "get_result", "(", ")", "return", "sortingextractor" ]
Generic function to run a sorter via function approach. 2 Usage with name or class: by name: >>> sorting = run_sorter('tridesclous', recording) by class: >>> sorting = run_sorter(TridesclousSorter, recording)
[ "Generic", "function", "to", "run", "a", "sorter", "via", "function", "approach", "." ]
f7c054383d1ebca640966b057c087fa187955d13
https://github.com/SpikeInterface/spiketoolkit/blob/f7c054383d1ebca640966b057c087fa187955d13/spiketoolkit/sorters/sorterlist.py#L29-L57
17,394
SpikeInterface/spiketoolkit
spiketoolkit/comparison/sortingcomparison.py
compute_performance
def compute_performance(SC, verbose=True, output='dict'): """ Return some performance value for comparison. Parameters ------- SC: SortingComparison instance The SortingComparison verbose: bool Display on console or not output: dict or pandas Returns ---------- performance: dict or pandas.Serie depending output param """ counts = SC._counts tp_rate = float(counts['TP']) / counts['TOT_ST1'] * 100 cl_rate = float(counts['CL']) / counts['TOT_ST1'] * 100 fn_rate = float(counts['FN']) / counts['TOT_ST1'] * 100 fp_st1 = float(counts['FP']) / counts['TOT_ST1'] * 100 fp_st2 = float(counts['FP']) / counts['TOT_ST2'] * 100 accuracy = tp_rate / (tp_rate + fn_rate + fp_st1) * 100 sensitivity = tp_rate / (tp_rate + fn_rate) * 100 miss_rate = fn_rate / (tp_rate + fn_rate) * 100 precision = tp_rate / (tp_rate + fp_st1) * 100 false_discovery_rate = fp_st1 / (tp_rate + fp_st1) * 100 performance = {'tp': tp_rate, 'cl': cl_rate, 'fn': fn_rate, 'fp_st1': fp_st1, 'fp_st2': fp_st2, 'accuracy': accuracy, 'sensitivity': sensitivity, 'precision': precision, 'miss_rate': miss_rate, 'false_disc_rate': false_discovery_rate} if verbose: txt = _txt_performance.format(**performance) print(txt) if output == 'dict': return performance elif output == 'pandas': return pd.Series(performance)
python
def compute_performance(SC, verbose=True, output='dict'): """ Return some performance value for comparison. Parameters ------- SC: SortingComparison instance The SortingComparison verbose: bool Display on console or not output: dict or pandas Returns ---------- performance: dict or pandas.Serie depending output param """ counts = SC._counts tp_rate = float(counts['TP']) / counts['TOT_ST1'] * 100 cl_rate = float(counts['CL']) / counts['TOT_ST1'] * 100 fn_rate = float(counts['FN']) / counts['TOT_ST1'] * 100 fp_st1 = float(counts['FP']) / counts['TOT_ST1'] * 100 fp_st2 = float(counts['FP']) / counts['TOT_ST2'] * 100 accuracy = tp_rate / (tp_rate + fn_rate + fp_st1) * 100 sensitivity = tp_rate / (tp_rate + fn_rate) * 100 miss_rate = fn_rate / (tp_rate + fn_rate) * 100 precision = tp_rate / (tp_rate + fp_st1) * 100 false_discovery_rate = fp_st1 / (tp_rate + fp_st1) * 100 performance = {'tp': tp_rate, 'cl': cl_rate, 'fn': fn_rate, 'fp_st1': fp_st1, 'fp_st2': fp_st2, 'accuracy': accuracy, 'sensitivity': sensitivity, 'precision': precision, 'miss_rate': miss_rate, 'false_disc_rate': false_discovery_rate} if verbose: txt = _txt_performance.format(**performance) print(txt) if output == 'dict': return performance elif output == 'pandas': return pd.Series(performance)
[ "def", "compute_performance", "(", "SC", ",", "verbose", "=", "True", ",", "output", "=", "'dict'", ")", ":", "counts", "=", "SC", ".", "_counts", "tp_rate", "=", "float", "(", "counts", "[", "'TP'", "]", ")", "/", "counts", "[", "'TOT_ST1'", "]", "*", "100", "cl_rate", "=", "float", "(", "counts", "[", "'CL'", "]", ")", "/", "counts", "[", "'TOT_ST1'", "]", "*", "100", "fn_rate", "=", "float", "(", "counts", "[", "'FN'", "]", ")", "/", "counts", "[", "'TOT_ST1'", "]", "*", "100", "fp_st1", "=", "float", "(", "counts", "[", "'FP'", "]", ")", "/", "counts", "[", "'TOT_ST1'", "]", "*", "100", "fp_st2", "=", "float", "(", "counts", "[", "'FP'", "]", ")", "/", "counts", "[", "'TOT_ST2'", "]", "*", "100", "accuracy", "=", "tp_rate", "/", "(", "tp_rate", "+", "fn_rate", "+", "fp_st1", ")", "*", "100", "sensitivity", "=", "tp_rate", "/", "(", "tp_rate", "+", "fn_rate", ")", "*", "100", "miss_rate", "=", "fn_rate", "/", "(", "tp_rate", "+", "fn_rate", ")", "*", "100", "precision", "=", "tp_rate", "/", "(", "tp_rate", "+", "fp_st1", ")", "*", "100", "false_discovery_rate", "=", "fp_st1", "/", "(", "tp_rate", "+", "fp_st1", ")", "*", "100", "performance", "=", "{", "'tp'", ":", "tp_rate", ",", "'cl'", ":", "cl_rate", ",", "'fn'", ":", "fn_rate", ",", "'fp_st1'", ":", "fp_st1", ",", "'fp_st2'", ":", "fp_st2", ",", "'accuracy'", ":", "accuracy", ",", "'sensitivity'", ":", "sensitivity", ",", "'precision'", ":", "precision", ",", "'miss_rate'", ":", "miss_rate", ",", "'false_disc_rate'", ":", "false_discovery_rate", "}", "if", "verbose", ":", "txt", "=", "_txt_performance", ".", "format", "(", "*", "*", "performance", ")", "print", "(", "txt", ")", "if", "output", "==", "'dict'", ":", "return", "performance", "elif", "output", "==", "'pandas'", ":", "return", "pd", ".", "Series", "(", "performance", ")" ]
Return some performance value for comparison. Parameters ------- SC: SortingComparison instance The SortingComparison verbose: bool Display on console or not output: dict or pandas Returns ---------- performance: dict or pandas.Serie depending output param
[ "Return", "some", "performance", "value", "for", "comparison", "." ]
f7c054383d1ebca640966b057c087fa187955d13
https://github.com/SpikeInterface/spiketoolkit/blob/f7c054383d1ebca640966b057c087fa187955d13/spiketoolkit/comparison/sortingcomparison.py#L279-L325
17,395
uber/rides-python-sdk
uber_rides/errors.py
HTTPError._complex_response_to_error_adapter
def _complex_response_to_error_adapter(self, body): """Convert a list of error responses.""" meta = body.get('meta') errors = body.get('errors') e = [] for error in errors: status = error['status'] code = error['code'] title = error['title'] e.append(ErrorDetails(status, code, title)) return e, meta
python
def _complex_response_to_error_adapter(self, body): """Convert a list of error responses.""" meta = body.get('meta') errors = body.get('errors') e = [] for error in errors: status = error['status'] code = error['code'] title = error['title'] e.append(ErrorDetails(status, code, title)) return e, meta
[ "def", "_complex_response_to_error_adapter", "(", "self", ",", "body", ")", ":", "meta", "=", "body", ".", "get", "(", "'meta'", ")", "errors", "=", "body", ".", "get", "(", "'errors'", ")", "e", "=", "[", "]", "for", "error", "in", "errors", ":", "status", "=", "error", "[", "'status'", "]", "code", "=", "error", "[", "'code'", "]", "title", "=", "error", "[", "'title'", "]", "e", ".", "append", "(", "ErrorDetails", "(", "status", ",", "code", ",", "title", ")", ")", "return", "e", ",", "meta" ]
Convert a list of error responses.
[ "Convert", "a", "list", "of", "error", "responses", "." ]
76ecd75ab5235d792ec1010e36eca679ba285127
https://github.com/uber/rides-python-sdk/blob/76ecd75ab5235d792ec1010e36eca679ba285127/uber_rides/errors.py#L53-L66
17,396
uber/rides-python-sdk
uber_rides/errors.py
ServerError._adapt_response
def _adapt_response(self, response): """Convert various error responses to standardized ErrorDetails.""" errors, meta = super(ServerError, self)._adapt_response(response) return errors[0], meta
python
def _adapt_response(self, response): """Convert various error responses to standardized ErrorDetails.""" errors, meta = super(ServerError, self)._adapt_response(response) return errors[0], meta
[ "def", "_adapt_response", "(", "self", ",", "response", ")", ":", "errors", ",", "meta", "=", "super", "(", "ServerError", ",", "self", ")", ".", "_adapt_response", "(", "response", ")", "return", "errors", "[", "0", "]", ",", "meta" ]
Convert various error responses to standardized ErrorDetails.
[ "Convert", "various", "error", "responses", "to", "standardized", "ErrorDetails", "." ]
76ecd75ab5235d792ec1010e36eca679ba285127
https://github.com/uber/rides-python-sdk/blob/76ecd75ab5235d792ec1010e36eca679ba285127/uber_rides/errors.py#L141-L144
17,397
uber/rides-python-sdk
uber_rides/request.py
Request._prepare
def _prepare(self): """Builds a URL and return a PreparedRequest. Returns (requests.PreparedRequest) Raises UberIllegalState (APIError) """ if self.method not in http.ALLOWED_METHODS: raise UberIllegalState('Unsupported HTTP Method.') api_host = self.api_host headers = self._build_headers(self.method, self.auth_session) url = build_url(api_host, self.path) data, params = generate_data(self.method, self.args) return generate_prepared_request( self.method, url, headers, data, params, self.handlers, )
python
def _prepare(self): """Builds a URL and return a PreparedRequest. Returns (requests.PreparedRequest) Raises UberIllegalState (APIError) """ if self.method not in http.ALLOWED_METHODS: raise UberIllegalState('Unsupported HTTP Method.') api_host = self.api_host headers = self._build_headers(self.method, self.auth_session) url = build_url(api_host, self.path) data, params = generate_data(self.method, self.args) return generate_prepared_request( self.method, url, headers, data, params, self.handlers, )
[ "def", "_prepare", "(", "self", ")", ":", "if", "self", ".", "method", "not", "in", "http", ".", "ALLOWED_METHODS", ":", "raise", "UberIllegalState", "(", "'Unsupported HTTP Method.'", ")", "api_host", "=", "self", ".", "api_host", "headers", "=", "self", ".", "_build_headers", "(", "self", ".", "method", ",", "self", ".", "auth_session", ")", "url", "=", "build_url", "(", "api_host", ",", "self", ".", "path", ")", "data", ",", "params", "=", "generate_data", "(", "self", ".", "method", ",", "self", ".", "args", ")", "return", "generate_prepared_request", "(", "self", ".", "method", ",", "url", ",", "headers", ",", "data", ",", "params", ",", "self", ".", "handlers", ",", ")" ]
Builds a URL and return a PreparedRequest. Returns (requests.PreparedRequest) Raises UberIllegalState (APIError)
[ "Builds", "a", "URL", "and", "return", "a", "PreparedRequest", "." ]
76ecd75ab5235d792ec1010e36eca679ba285127
https://github.com/uber/rides-python-sdk/blob/76ecd75ab5235d792ec1010e36eca679ba285127/uber_rides/request.py#L98-L122
17,398
uber/rides-python-sdk
uber_rides/request.py
Request._send
def _send(self, prepared_request): """Send a PreparedRequest to the server. Parameters prepared_request (requests.PreparedRequest) Returns (Response) A Response object, whichcontains a server's response to an HTTP request. """ session = Session() response = session.send(prepared_request) return Response(response)
python
def _send(self, prepared_request): """Send a PreparedRequest to the server. Parameters prepared_request (requests.PreparedRequest) Returns (Response) A Response object, whichcontains a server's response to an HTTP request. """ session = Session() response = session.send(prepared_request) return Response(response)
[ "def", "_send", "(", "self", ",", "prepared_request", ")", ":", "session", "=", "Session", "(", ")", "response", "=", "session", ".", "send", "(", "prepared_request", ")", "return", "Response", "(", "response", ")" ]
Send a PreparedRequest to the server. Parameters prepared_request (requests.PreparedRequest) Returns (Response) A Response object, whichcontains a server's response to an HTTP request.
[ "Send", "a", "PreparedRequest", "to", "the", "server", "." ]
76ecd75ab5235d792ec1010e36eca679ba285127
https://github.com/uber/rides-python-sdk/blob/76ecd75ab5235d792ec1010e36eca679ba285127/uber_rides/request.py#L124-L137
17,399
uber/rides-python-sdk
uber_rides/request.py
Request._build_headers
def _build_headers(self, method, auth_session): """Create headers for the request. Parameters method (str) HTTP method (e.g. 'POST'). auth_session (Session) The Session object containing OAuth 2.0 credentials. Returns headers (dict) Dictionary of access headers to attach to request. Raises UberIllegalState (ApiError) Raised if headers are invalid. """ token_type = auth_session.token_type if auth_session.server_token: token = auth_session.server_token else: token = auth_session.oauth2credential.access_token if not self._authorization_headers_valid(token_type, token): message = 'Invalid token_type or token.' raise UberIllegalState(message) headers = { 'Authorization': ' '.join([token_type, token]), 'X-Uber-User-Agent': 'Python Rides SDK v{}'.format(LIB_VERSION), } if method in http.BODY_METHODS: headers.update(http.DEFAULT_CONTENT_HEADERS) return headers
python
def _build_headers(self, method, auth_session): """Create headers for the request. Parameters method (str) HTTP method (e.g. 'POST'). auth_session (Session) The Session object containing OAuth 2.0 credentials. Returns headers (dict) Dictionary of access headers to attach to request. Raises UberIllegalState (ApiError) Raised if headers are invalid. """ token_type = auth_session.token_type if auth_session.server_token: token = auth_session.server_token else: token = auth_session.oauth2credential.access_token if not self._authorization_headers_valid(token_type, token): message = 'Invalid token_type or token.' raise UberIllegalState(message) headers = { 'Authorization': ' '.join([token_type, token]), 'X-Uber-User-Agent': 'Python Rides SDK v{}'.format(LIB_VERSION), } if method in http.BODY_METHODS: headers.update(http.DEFAULT_CONTENT_HEADERS) return headers
[ "def", "_build_headers", "(", "self", ",", "method", ",", "auth_session", ")", ":", "token_type", "=", "auth_session", ".", "token_type", "if", "auth_session", ".", "server_token", ":", "token", "=", "auth_session", ".", "server_token", "else", ":", "token", "=", "auth_session", ".", "oauth2credential", ".", "access_token", "if", "not", "self", ".", "_authorization_headers_valid", "(", "token_type", ",", "token", ")", ":", "message", "=", "'Invalid token_type or token.'", "raise", "UberIllegalState", "(", "message", ")", "headers", "=", "{", "'Authorization'", ":", "' '", ".", "join", "(", "[", "token_type", ",", "token", "]", ")", ",", "'X-Uber-User-Agent'", ":", "'Python Rides SDK v{}'", ".", "format", "(", "LIB_VERSION", ")", ",", "}", "if", "method", "in", "http", ".", "BODY_METHODS", ":", "headers", ".", "update", "(", "http", ".", "DEFAULT_CONTENT_HEADERS", ")", "return", "headers" ]
Create headers for the request. Parameters method (str) HTTP method (e.g. 'POST'). auth_session (Session) The Session object containing OAuth 2.0 credentials. Returns headers (dict) Dictionary of access headers to attach to request. Raises UberIllegalState (ApiError) Raised if headers are invalid.
[ "Create", "headers", "for", "the", "request", "." ]
76ecd75ab5235d792ec1010e36eca679ba285127
https://github.com/uber/rides-python-sdk/blob/76ecd75ab5235d792ec1010e36eca679ba285127/uber_rides/request.py#L154-L190