code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
while True:
text = compat.input('ctl > ')
command, args = self.parse_input(text)
if not command:
continue
response = self.call(command, *args)
response.show()
|
def loop(self)
|
Enter loop, read user input then run command. Repeat
| 7.199159
| 6.067501
| 1.186511
|
uri = str(uri)
if uri not in namespaces.values():
namespaces[AnonNS().ns] = uri
return [k for k, v in namespaces.items() if uri == v][0]
|
def namespace_for(uri: Union[URIRef, Namespace, str]) -> str
|
Reverse namespace lookup. Note that returned namespace may not be unique
:param uri: namespace URI
:return: namespace
| 6.909273
| 6.627646
| 1.042493
|
# Cleanup text
text = text.strip()
text = re.sub('\s+', ' ', text) # collpse multiple spaces
space, quote, parts = ' ', '"', []
part, quoted = '', False
for char in text:
# Encoutered beginning double quote
if char is quote and quoted is False:
quoted = True
continue
# Encountered the ending double quote
if char is quote and quoted is True:
quoted = False
parts.append(part.strip())
part = ''
continue
# Found space in quoted
if char is space and quoted is True:
part += char
continue
# Found space but not quoted
if char is space:
if part:
parts.append(part)
part = ''
continue
# Found other character
if char is not space:
part += char
continue
if part:
parts.append(part.strip())
return parts
|
def split(text)
|
Split text into arguments accounting for muti-word arguments
which are double quoted
| 3.29294
| 3.121193
| 1.055026
|
return sorted([name for name, kind in plugin._plugins.keys()
if kind == use and (include_mime_types or '/' not in name)])
|
def known_formats(use: Union[Serializer, Parser]=Serializer, include_mime_types: bool = False) -> List[str]
|
Return a list of available formats in rdflib for the required task
:param use: task (typically Serializer or Parser)
:param include_mime_types: whether mime types are included in the return list
:return: list of formats
| 9.290539
| 13.276413
| 0.699778
|
try:
st = os.stat(file_name)
except FileNotFoundError:
return None
return stat.S_IFMT(st.st_mode), st.st_size, st.st_mtime
|
def file_signature(file_name: str) -> Optional[Tuple]
|
Return an identity signature for file name
:param file_name: name of file
:return: mode, size, last modified time if file exists, otherwise none
| 2.373667
| 2.276597
| 1.042638
|
request = urllib.request.Request(url)
request.get_method = lambda: 'HEAD'
response = None
try:
response = urllib.request.urlopen(request)
except urllib.error.HTTPError:
return None
return response.info()['Last-Modified'], response.info()['Content-Length'], response.info().get('ETag')
|
def url_signature(url: str) -> Optional[Tuple]
|
Return an identify signature for url
:param url: item to get signature for
:return: tuple containing last modified, length and, if present, etag
| 2.317506
| 1.971666
| 1.175405
|
return url_signature(name) if is_url(name) else file_signature(name) if is_file(name) else None
|
def signature(name: str) -> Optional[Tuple]
|
Return the file or URL signature for name
:param name:
:return:
| 4.450778
| 3.589181
| 1.240054
|
try:
import pypandoc
except (ImportError, OSError) as e:
print('No pypandoc or pandoc: %s' % (e,))
if is_py3:
fh = open(readme_file, encoding='utf-8')
else:
fh = open(readme_file)
long_description = fh.read()
fh.close()
return long_description
else:
return pypandoc.convert(readme_file, 'rst')
|
def read_long_description(readme_file)
|
Read package long description from README file
| 2.292586
| 2.260112
| 1.014368
|
with open('./oi/version.py') as fh:
for line in fh:
if line.startswith('VERSION'):
return line.split('=')[1].strip().strip("'")
|
def read_version()
|
Read package version
| 3.3702
| 3.483945
| 0.967352
|
return re.sub(r'^@prefix .* .\n', '',
g.serialize(format="turtle").decode(),
flags=re.MULTILINE).strip()
|
def strip_prefixes(g: Graph)
|
Remove the prefixes from the graph for aesthetics
| 5.710557
| 4.499437
| 1.269172
|
[self._g.bind(e[0], e[1]) for e in nsmap.items()]
|
def add_prefixes(self, nsmap: Dict[str, Namespace]) -> None
|
Add the required prefix definitions
:return:
| 7.425866
| 7.025667
| 1.056962
|
self._g.add((subj, pred, obj))
return self
|
def add(self, subj: Node, pred: URIRef, obj: Node) -> "FHIRResource"
|
Shortcut to rdflib add function
:param subj:
:param pred:
:param obj:
:return: self for chaining
| 4.328135
| 5.043913
| 0.858091
|
pred_type = self._meta.predicate_type(pred) if not valuetype else valuetype
# Transform generic resources into specific types
if pred_type == FHIR.Resource:
pred_type = FHIR[val.resourceType]
val_meta = FHIRMetaVocEntry(self._vocabulary, pred_type)
for k, p in val_meta.predicates().items():
if k in val:
self.add_val(subj, p, val, k)
if pred == FHIR.CodeableConcept.coding:
self.add_type_arc(subj, val)
elif k == "value" and val_meta.predicate_type(p) == FHIR.Element:
# value / Element is the wild card combination -- if there is a "value[x]" in val, emit it where the
# type comes from 'x'
for vk in val._as_dict.keys():
if vk.startswith(k):
self.add_val(subj, FHIR['Extension.' + vk], val, vk, self._meta.value_predicate_to_type(vk))
else:
# Can have an extension only without a primary value
self.add_extension_val(subj, val, k, p)
|
def add_value_node(self, subj: Node, pred: URIRef, val: Union[JsonObj, str, List],
valuetype: Optional[URIRef]= None) -> None
|
Expand val according to the range of pred and add it to the graph
:param subj: graph subject
:param pred: graph predicate
:param val: JSON representation of target object
:param valuetype: predicate type if it can't be directly determined
| 8.420882
| 8.311364
| 1.013177
|
match = FHIR_RESOURCE_RE.match(val)
ref_uri_str = res_type = None
if match:
ref_uri_str = val if match.group(FHIR_RE_BASE) else (self._base_uri + urllib.parse.quote(val))
res_type = match.group(FHIR_RE_RESOURCE)
elif '://' in val:
ref_uri_str = val
res_type = "Resource"
elif self._base_uri and not val.startswith('#') and not val.startswith('/'):
ref_uri_str = self._base_uri + urllib.parse.quote(val)
res_type = val.split('/', 1)[0] if '/' in val else "Resource"
if ref_uri_str:
ref_uri = URIRef(ref_uri_str)
self.add(subj, FHIR.link, ref_uri)
self.add(ref_uri, RDF.type, FHIR[res_type])
|
def add_reference(self, subj: Node, val: str) -> None
|
Add a fhir:link and RDF type arc if it can be determined
:param subj: reference subject
:param val: reference value
| 2.871444
| 2.6356
| 1.089484
|
if json_key not in json_obj:
print("Expecting to find object named '{}' in JSON:".format(json_key))
print(json_obj._as_json_dumps())
print("entry skipped")
return None
val = json_obj[json_key]
if isinstance(val, List):
list_idx = 0
for lv in val:
entry_bnode = BNode()
# TODO: this is getting messy. Refactor and clean this up
if pred == FHIR.Bundle.entry:
entry_subj = URIRef(lv.fullUrl)
self.add(entry_bnode, FHIR.index, Literal(list_idx))
self.add_val(entry_bnode, FHIR.Bundle.entry.fullUrl, lv, 'fullUrl')
self.add(entry_bnode, FHIR.Bundle.entry.resource, entry_subj)
self.add(subj, pred, entry_bnode)
entry_mv = FHIRMetaVocEntry(self._vocabulary, FHIR.BundleEntryComponent)
for k, p in entry_mv.predicates().items():
if k not in ['resource', 'fullUrl'] and k in lv:
print("---> adding {}".format(k))
self.add_val(subj, p, lv, k)
FHIRResource(self._vocabulary, None, self._base_uri, lv.resource, self._g,
False, self._replace_narrative_text, False, resource_uri=entry_subj)
else:
self.add(entry_bnode, FHIR.index, Literal(list_idx))
if isinstance(lv, JsonObj):
self.add_value_node(entry_bnode, pred, lv, valuetype)
else:
vt = self._meta.predicate_type(pred)
atom_type = self._meta.primitive_datatype_nostring(vt) if vt else None
self.add(entry_bnode, FHIR.value, Literal(lv, datatype=atom_type))
self.add(subj, pred, entry_bnode)
list_idx += 1
else:
vt = self._meta.predicate_type(pred) if not valuetype else valuetype
if self._meta.is_atom(pred):
if self._replace_narrative_text and pred == FHIR.Narrative.div and len(val) > 120:
val = REPLACED_NARRATIVE_TEXT
self.add(subj, pred, Literal(val))
else:
v = BNode()
if self._meta.is_primitive(vt):
self.add(v, FHIR.value, Literal(str(val), datatype=self._meta.primitive_datatype_nostring(vt, val)))
else:
self.add_value_node(v, pred, val, valuetype)
self.add(subj, pred, v)
if pred == FHIR.Reference.reference:
self.add_reference(subj, val)
self.add_extension_val(v, json_obj, json_key)
return v
return None
|
def add_val(self, subj: Node, pred: URIRef, json_obj: JsonObj, json_key: str,
valuetype: Optional[URIRef] = None) -> Optional[BNode]
|
Add the RDF representation of val to the graph as a target of subj, pred. Note that FHIR lists are
represented as a list of BNODE objects with a fhir:index discrimanant
:param subj: graph subject
:param pred: predicate
:param json_obj: object containing json_key
:param json_key: name of the value in the JSON resource
:param valuetype: value type if NOT determinable by predicate
:return: value node if target is a BNode else None
| 3.370517
| 3.217635
| 1.047514
|
Union[JsonObj, List[JsonObjTypes]],
key: str,
pred: Optional[URIRef] = None) -> None:
extendee_name = "_" + key
if extendee_name in json_obj:
if not isinstance(subj, BNode):
raise NotImplementedError("Extension to something other than a simple BNode")
if isinstance(json_obj[extendee_name], list):
if not pred:
raise NotImplemented("Case 3 not implemented")
entry_idx = 0
for extension in json_obj[extendee_name]:
entry = BNode()
self.add(entry, FHIR.index, Literal(entry_idx))
self.add_val(entry, FHIR.Element.extension, extension, 'extension')
self.add(subj, pred, entry)
entry_idx += 1
elif 'fhir_comments' in json_obj[extendee_name] and len(json_obj[extendee_name]) == 1:
# TODO: determine whether and how fhir comments should be represented in RDF.
# for the moment we just drop them
print("fhir_comment ignored")
print(json_obj[extendee_name]._as_json_dumps())
pass
else:
self.add_val(subj, FHIR.Element.extension, json_obj[extendee_name], 'extension')
|
def add_extension_val(self,
subj: Node,
json_obj
|
Add any extensions for the supplied object. This can be called in following situations:
1) Single extended value
"key" : (value),
"_key" : {
"extension": [
{
"url": "http://...",
"value[x]": "......"
}
]
}
2) Single extension only
"_key" : {
"extension": [
{
"url": "http://...",
"value[x]": "......"
}
]
}
3) Multiple extended values:
(TBD)
4) Multiple extensions only
"_key" : [
{
"extension": [
{
"url": "http://...",
"value[x]": "......"
}
]
}
]
:param subj: Node containing subject
:param json_obj: Object (potentially) containing "_key"
:param key: name of element that is possibly extended (as indicated by "_" prefix)
:param pred: predicate for the contained elements. Only used in situations 3) (?) and 4
| 4.518268
| 4.881831
| 0.925527
|
link_node = g.value(subject, predicate)
if link_node:
l = g.value(link_node, FHIR.link)
if l:
typ = g.value(l, RDF.type)
return l, typ
return None, None
|
def link(g: Graph, subject: Node, predicate: URIRef) -> Tuple[Optional[URIRef], Optional[URIRef]]
|
Return the link URI and link type for subject and predicate
:param g: graph context
:param subject: subject of linke
:param predicate: link predicate
:return: URI and optional type URI. URI is None if not a link
| 3.107084
| 2.907425
| 1.068672
|
# EXAMPLE:
# fhir:Patient.maritalStatus [
# fhir:CodeableConcept.coding [
# fhir:index 0;
# a sct:36629006;
# fhir:Coding.system [ fhir:value "http://snomed.info/sct" ];
# fhir:Coding.code [ fhir:value "36629006" ];
# fhir:Coding.display [ fhir:value "Legally married" ]
# ], [
# fhir:index 1;
# fhir:Coding.system [ fhir:value "http://hl7.org/fhir/v3/MaritalStatus" ];
# fhir:Coding.code [ fhir:value "M" ]
# ]
# ];
rval = []
coded_entry = g.value(subject, predicate, any=False)
if coded_entry:
for codeable_concept in list(g.objects(coded_entry, FHIR.CodeableConcept.coding)):
coding_system = value(g, codeable_concept, FHIR.Coding.system)
coding_code = value(g, codeable_concept, FHIR.Coding.code)
if coding_system and coding_code and (system is None or system == coding_system):
rval.append(CodeableConcept(coding_system, coding_code, g.value(codeable_concept, RDF.type, any=False)))
return rval
|
def codeable_concept_code(g: Graph, subject: Node, predicate: URIRef, system: Optional[str]=None) \
-> List[CodeableConcept]
|
Return a list of CodeableConcept entries for the supplied subject and predicate in graph g
:param g: graph containing the data
:param subject: subject
:param predicate: predicate
:param system: coding system. If present, only concepts in this system will be returned
:return: system, code and optional URI of matching concept(s)
| 2.562691
| 2.596152
| 0.987111
|
local_name = str(uri).replace(str(FHIR), '')
return local_name.rsplit('.', 1)[1] if '.' in local_name else local_name
|
def _to_str(uri: URIRef) -> str
|
Convert a FHIR style URI into a tag name to be used to retrieve data from a JSON representation
Example: http://hl7.org/fhir/Provenance.agent.whoReference --> whoReference
:param uri: URI to convert
:return: tag name
| 5.893602
| 5.295153
| 1.113018
|
rval = dict()
for parent in self._o.objects(self._subj, RDFS.subClassOf):
if isinstance(parent, URIRef) and not str(parent).startswith(str(W5)):
rval.update(**FHIRMetaVocEntry(self._o, parent).predicates())
for s in self._o.subjects(RDFS.domain, self._subj):
rval[self._to_str(s)] = s
return rval
|
def predicates(self) -> Dict[str, URIRef]
|
Return the tag names and corresponding URI's for all properties that can be associated with subject
:return: Map from tag name (JSON object identifier) to corresponding URI
| 6.083323
| 6.098521
| 0.997508
|
return self._o.value(pred, RDFS.range)
|
def predicate_type(self, pred: URIRef) -> URIRef
|
Return the type of pred
:param pred: predicate to map
:return:
| 23.071722
| 22.995872
| 1.003298
|
if not self.has_type(t):
raise TypeError("Unrecognized FHIR type: {}".format(t))
return True
|
def is_valid(self, t: URIRef) -> bool
|
Raise an exception if 't' is unrecognized
:param t: metadata URI
| 9.928436
| 7.842943
| 1.265907
|
return FHIR.Primitive in self._o.objects(t, RDFS.subClassOf)
|
def is_primitive(self, t: URIRef) -> bool
|
Determine whether type "t" is a FHIR primitive type
:param t: type to test
:return:
| 22.036512
| 17.42976
| 1.264304
|
if value_pred.startswith('value'):
vp_datatype = value_pred.replace('value', '')
if vp_datatype:
if self.has_type(FHIR[vp_datatype]):
return FHIR[vp_datatype]
else:
vp_datatype = vp_datatype[0].lower() + vp_datatype[1:]
if self.has_type(FHIR[vp_datatype]):
return FHIR[vp_datatype]
if self.is_valid(FHIR[value_pred]):
return FHIR[value_pred]
|
def value_predicate_to_type(self, value_pred: str) -> URIRef
|
Convert a predicate in the form of "fhir:[...].value[type] to fhir:type, covering the downshift on the
first character if necessary
:param value_pred: Predicate associated with the value
:return: corresponding type or None if not found
| 2.819023
| 2.595524
| 1.086109
|
if not self.has_type(pred):
if '.value' in str(pred): # synthetic values (valueString, valueDate, ...)
return False
else:
raise TypeError("Unrecognized FHIR predicate: {}".format(pred))
return pred == FHIR.nodeRole or OWL.DatatypeProperty in set(self._o.objects(pred, RDF.type))
|
def is_atom(self, pred: URIRef) -> bool
|
Determine whether predicate is an 'atomic' type -- i.e it doesn't use a FHIR value representation
:param pred: type to test
:return:
| 13.306065
| 10.210762
| 1.303141
|
for sco in self._o.objects(t, RDFS.subClassOf):
sco_type = self._o.value(sco, RDF.type)
sco_prop = self._o.value(sco, OWL.onProperty)
if sco_type == OWL.Restriction and sco_prop == FHIR.value:
# The older versions of fhir.ttl (incorrectly) referenced the datatype directly
restriction_type = self._o.value(sco, OWL.allValuesFrom)
if not restriction_type:
restriction_dt_entry = self._o.value(sco, OWL.someValuesFrom)
restriction_type = self._o.value(restriction_dt_entry, OWL.onDatatype)
return restriction_type
return None
|
def primitive_datatype(self, t: URIRef) -> Optional[URIRef]
|
Return the data type for primitive type t, if any
:param t: type
:return: corresponding data type
| 3.740779
| 3.796163
| 0.98541
|
vt = self.primitive_datatype(t)
if self.fhir_dates and vt == XSD.dateTime and v:
return XSD.gYear if len(v) == 4 else XSD.gYearMonth if len(v) == 7 \
else XSD.date if (len(v) == 10 or (len(v) > 10 and v[10] in '+-')) else XSD.dateTime
# For some reason the oid datatype is represented as a string as well
if self.fhir_oids and vt == XSD.anyURI:
vt = None
return None if vt == XSD.string else vt
|
def primitive_datatype_nostring(self, t: URIRef, v: Optional[str] = None) -> Optional[URIRef]
|
Return the data type for primitive type t, if any, defaulting string to no type
:param t: type
:param v: value - for munging dates if we're doing FHIR official output
:return: corresponding data type
| 4.424099
| 3.947698
| 1.120678
|
if self._cache_directory is not None:
if name in self._cache:
os.remove(os.path.join(self._cache_directory, self._cache[name].loc))
fname = os.path.join(self._cache_directory, str(uuid.uuid4()))
with open(fname, 'wb') as f:
pickle.dump(obj, f)
self._cache[name] = _PickleJar.CacheEntry(sig, fname)
self._update()
|
def add(self, name: str, sig: Tuple, obj: object) -> None
|
Add a file to the cache
:param name: name of the object to be pickled
:param sig: signature for object
:param obj: object to pickle
| 3.039158
| 2.759476
| 1.101353
|
if name not in self._cache:
return None
if self._cache[name].sig != sig:
del self._cache[name]
self._update()
return None
with open(self._cache[name].loc, 'rb') as f:
return pickle.load(f)
|
def get(self, name: str, sig: Tuple) -> Optional[object]
|
Return the object representing name if it is cached
:param name: name of object
:param sig: unique signature of object
:return: object if exists and signature matches
| 2.740979
| 3.036548
| 0.902663
|
if self._cache_directory is not None:
# Safety - if there isn't a cache directory file, this probably isn't a valid cache
assert os.path.exists(self._cache_directory_index), "Attempt to clear a non-existent cache"
self._load() # Shouldn't have any impact but...
for e in self._cache.values():
if os.path.exists(e.loc):
os.remove(e.loc)
self._cache.clear()
self._update()
self._cache = {}
|
def clear(self) -> None
|
Clear all cache entries for directory and, if it is a 'pure' directory, remove the directory itself
| 6.059643
| 5.34553
| 1.133591
|
def check_for_continuation(data_: JsonObj) -> Optional[str]:
if do_continuations and 'link' in data_ and isinstance(data_.link, list):
for link_e in data_.link:
if 'relation' in link_e and link_e.relation == 'next':
return link_e.url
return None
if target_graph is None:
target_graph = Graph()
if metavoc is None:
metavoc = FHIRMetaVoc().g
elif isinstance(metavoc, FHIRMetaVoc):
metavoc = metavoc.g
page_fname = json_fname
while page_fname:
data = load(page_fname)
if 'resourceType' in data and data.resourceType != 'Bundle':
FHIRResource(metavoc, None, base_uri, data, target=target_graph, add_ontology_header=add_ontology_header,
replace_narrative_text=replace_narrative_text)
page_fname = check_for_continuation(data)
elif 'entry' in data and isinstance(data.entry, list) and 'resource' in data.entry[0]:
FHIRCollection(metavoc, None, base_uri, data, target=target_graph,
add_ontology_header=add_ontology_header if 'resourceType' in data else False,
replace_narrative_text=replace_narrative_text)
page_fname = check_for_continuation(data)
else:
page_fname = None
target_graph = None
return target_graph
|
def fhir_json_to_rdf(json_fname: str,
base_uri: str = "http://hl7.org/fhir/",
target_graph: Optional[Graph] = None,
add_ontology_header: bool = True,
do_continuations: bool = True,
replace_narrative_text: bool = False,
metavoc: Optional[Union[Graph, FHIRMetaVoc]] = None) -> Graph
|
Convert a FHIR JSON resource image to RDF
:param json_fname: Name or URI of the file to convert
:param base_uri: Base URI to use for relative references.
:param target_graph: If supplied, add RDF to this graph. If not, start with an empty graph.
:param add_ontology_header: True means add owl:Ontology declaration to output
:param do_continuations: True means follow continuation records on bundles and queries
:param replace_narrative_text: True means replace any narrative text longer than 120 characters with
'<div xmlns="http://www.w3.org/1999/xhtml">(removed)</div>'
:param metavoc: FHIR Metadata Vocabulary (fhir.ttl) graph
:return: resulting graph
| 2.212586
| 2.199627
| 1.005892
|
return URIRef(str(s) + '.' + str(p).rsplit('/', 1)[1] + ("_{}".format(idx) if idx is not None else ''))
|
def subj_pred_idx_to_uri(s: URIRef, p: URIRef, idx: Optional[int] = None) -> URIRef
|
Convert FHIR subject, predicate and entry index into a URI. The resulting element can be substituted
for the name of the target BNODE
:param s: Subject URI (e.g. "fhir:Patient/f201", "fhir:Patient/f201.Patient.identifier_0", ...)
:param p: Predicate URI (e.g. "fhir:Patient.identifier", "fhir.Identifier.use
:param idx: Relative position of BNODE if in a list
:return: URI that can replace the BNODE (e.g. "fhir:Patient/f201
| 3.886556
| 4.394118
| 0.884491
|
for p, o in gin.predicate_objects(s):
if not isinstance(o, BNode):
gout.add((sk_s, p, o))
else:
sk_o = subj_pred_idx_to_uri(sk_s, p, gin.value(o, FHIR.index))
gout.add((sk_s, p, sk_o))
map_node(o, sk_o, gin, gout)
|
def map_node(s: Node, sk_s: URIRef, gin: Graph, gout: Graph) -> None
|
Transform the BNode whose subject is s into its equivalent, replacing s with its 'skolemized' equivalent
:param s: Actual subject
:param sk_s: Equivalent URI of subject in output graph
:param gin: Input graph
:param gout: Output graph
| 2.777828
| 2.725044
| 1.01937
|
gout = Graph()
# Emit any unreferenced subject BNodes (boxes)
anon_subjs = [s for s in gin.subjects() if isinstance(s, BNode) and len([gin.subject_predicates(s)]) == 0]
if anon_subjs:
idx = None if len(anon_subjs) == 1 else 0
for s in anon_subjs:
map_node(s, FHIR['treeRoot' + ('_{}'.format(idx) if idx is not None else '')], gin, gout)
if idx is not None:
idx += 1
# Cover all other non-bnode entries
for subj in set(s for s in gin.subjects() if isinstance(s, URIRef)):
map_node(subj, subj, gin, gout)
return gout
|
def skolemize(gin: Graph) -> Graph
|
Replace all of the blank nodes in graph gin with FHIR paths
:param gin: input graph
:return: output graph
| 5.007139
| 5.092001
| 0.983334
|
if target_graph is None:
target_graph = PrettyGraph()
for p, o in source_graph.predicate_objects(subj):
target_graph.add((subj, p, o))
if isinstance(o, BNode):
complete_definition(o, source_graph, target_graph)
return target_graph
|
def complete_definition(subj: Node,
source_graph: Graph,
target_graph: Optional[Graph]=None) -> PrettyGraph
|
Return the transitive closure of subject.
:param subj: URI or BNode for subject
:param source_graph: Graph containing defininition
:param target_graph: return graph (for recursion)
:return: target_graph
| 2.16321
| 1.862507
| 1.161451
|
return [l.decode('ascii') for l in sorted(g.serialize(format='nt').splitlines()) if l]
|
def dump_nt_sorted(g: Graph) -> List[str]
|
Dump graph g in a sorted n3 format
:param g: graph to dump
:return: stringified representation of g
| 5.266098
| 5.924399
| 0.888883
|
def graph_for_subject(g: Graph, subj: Node) -> Graph:
subj_in_g = complete_definition(subj, g)
if ignore_type_arcs:
for ta_s, ta_o in subj_in_g.subject_objects(RDF.type):
if isinstance(ta_s, BNode) and isinstance(ta_o, URIRef):
subj_in_g.remove((ta_s, RDF.type, ta_o))
if ignore_owl_version:
subj_in_g.remove((subj, OWL.versionIRI, subj_in_g.value(subj, OWL.versionIRI)))
return subj_in_g
def primary_subjects(g: Graph) -> Set[Node]:
anon_subjs = set(anon_s for anon_s in g.subjects()
if isinstance(anon_s, BNode) and len([g.subject_predicates(anon_s)]) == 0)
return set(s_ for s_ in g1.subjects() if isinstance(s_, URIRef)).union(anon_subjs)
rval = ""
# Step 1: Find any subjects in one graph that don't exist in the other
g1_subjs = primary_subjects(g1)
g2_subjs = primary_subjects(g2)
for s in g1_subjs - g2_subjs:
rval += "\n===== Subjects in Graph 1 but not Graph 2: "
rval += PrettyGraph.strip_prefixes(complete_definition(s, g1))
for s in g2_subjs - g1_subjs:
rval += "\n===== Subjects in Graph 2 but not Graph 1: "
rval += PrettyGraph.strip_prefixes(complete_definition(s, g2))
# Step 2: Iterate over all of the remaining subjects comparing their contents
for s in g1_subjs.intersection(g2_subjs):
s_in_g1 = graph_for_subject(g1, s)
s_in_g2 = graph_for_subject(g2, s)
in_both, in_first, in_second = graph_diff(skolemize(s_in_g1), skolemize(s_in_g2))
if compare_filter:
compare_filter(in_both, in_first, in_second)
if len(list(in_first)) or len(list(in_second)):
rval += "\n\nSubject {} DIFFERENCE: ".format(s) + '=' * 30
if len(in_first):
rval += "\n\t----> First: \n" + '\n'.join(dump_nt_sorted(in_first))
if len(in_second):
rval += "\n\t----> Second: \n" + '\n'.join(dump_nt_sorted(in_second))
rval += '-' * 40
return rval
|
def rdf_compare(g1: Graph, g2: Graph, ignore_owl_version: bool=False, ignore_type_arcs: bool = False,
compare_filter: Optional[Callable[[Graph, Graph, Graph], None]]=None) -> str
|
Compare graph g1 and g2
:param g1: first graph
:param g2: second graph
:param ignore_owl_version:
:param ignore_type_arcs:
:param compare_filter: Final adjustment for graph difference. Used, for example, to deal with FHIR decimal problems.
:return: List of differences as printable lines or blank if everything matches
| 2.392908
| 2.386261
| 1.002785
|
g = fhir_json_to_rdf(infile, opts.uribase, opts.graph, add_ontology_header=not opts.noontology,
do_continuations=not opts.nocontinuation, replace_narrative_text=bool(opts.nonarrative),
metavoc=opts.fhir_metavoc)
# If we aren't carrying graph in opts, we're doing a file by file transformation
if g:
if not opts.graph:
serialize_graph(g, outfile, opts)
return True
else:
print("{} : Not a FHIR collection or resource".format(infile))
return False
|
def proc_file(infile: str, outfile: str, opts: Namespace) -> bool
|
Process infile.
:param infile: input file to be processed
:param outfile: target output file.
:param opts:
:return:
| 10.490333
| 10.63848
| 0.986074
|
# If it looks like we're processing a URL as an input file, skip the suffix check
if '://' in ifn:
return True
if not ifn.endswith('.json'):
return False
if indir and (indir.startswith("_") or "/_" in indir or any(dn in indir for dn in opts.skipdirs)):
return False
if opts.skipfns and any(sfn in ifn for sfn in opts.skipfns):
return False
infile = os.path.join(indir, ifn)
if not opts.infile and opts.maxsize and os.path.getsize(infile) > (opts.maxsize * 1000):
return False
return True
|
def file_filter(ifn: str, indir: str, opts: Namespace) -> bool
|
Determine whether to process ifn. We con't process:
1) Anything in a directory having a path element that begins with "_"
2) Really, really big files
3) Temporary lists of know errors
:param ifn: input file name
:param indir: input directory
:param opts: argparse options
:return: True if to be processed, false if to be skipped
| 4.141908
| 3.928823
| 1.054237
|
dlp = dirlistproc.DirectoryListProcessor(argv,
description="Convert FHIR JSON into RDF",
infile_suffix=".json",
outfile_suffix=".ttl",
addargs=addargs,
noexit=not default_exit)
if not dlp.successful_parse:
return False
# Version
if dlp.opts.version:
print("FHIR to RDF Conversion Tool -- Version {}".format(__version__))
# We either have to have an input file or an input directory
if not dlp.opts.infile and not dlp.opts.indir:
if not dlp.opts.version:
dlp.parser.error("Either an input file or an input directory must be supplied")
return dlp.opts.version
# Create the output directory if needed
if dlp.opts.outdir and not os.path.exists(dlp.opts.outdir):
os.makedirs(dlp.opts.outdir)
# If we are going to a single output file or stdout, gather all the input
dlp.opts.graph = Graph() if (not dlp.opts.outfile and not dlp.opts.outdir) or\
(dlp.opts.outfile and len(dlp.opts.outfile) == 1) else None
dlp.opts.fhir_metavoc = load_fhir_ontology(dlp.opts)
# If it looks like we're processing a URL as an input file, skip the suffix check
if dlp.opts.infile and len(dlp.opts.infile) == 1 and not dlp.opts.indir and "://" in dlp.opts.infile[0]:
dlp.infile_suffix = ""
dlp.outfile_suffix = '.' + suffix_for(dlp.opts.format)
nfiles, nsuccess = dlp.run(proc=proc_file, file_filter_2=file_filter)
if nfiles:
if dlp.opts.graph:
serialize_graph(dlp.opts.graph, dlp.opts.outfile[0] if dlp.opts.outfile else None, dlp.opts)
return nsuccess > 0
return False
|
def fhirtordf(argv: List[str], default_exit: bool = True) -> bool
|
Entry point for command line utility
| 4.031743
| 3.975495
| 1.014149
|
uri_str = str(uri)
m = FHIR_RESOURCE_RE.match(uri_str)
if m:
return FHIR_RESOURCE(URIRef(m.group(FHIR_RE_BASE)), FHIR[m.group(FHIR_RE_RESOURCE)], m.group(FHIR_RE_ID))
else:
# Not in the FHIR format - we can only do namespace and name
namespace, name = uri_str.rsplit('#', 1) if '#' in uri_str \
else uri_str.rsplit('/', 1) if '/' in uri_str else (None, uri_str)
return FHIR_RESOURCE(URIRef(namespace), None, name)
|
def parse_fhir_resource_uri(uri: Union[URIRef, str]) -> FHIR_RESOURCE
|
Use the FHIR Regular Expression for Resource URI's to determine the namespace and type
of a given URI. As an example, "http://hl7.org/fhir/Patient/p123" maps to the tuple
``('Patient', 'http://hl7.org/fhir')
:param uri: URI to parse
:return: FHIR_RESOURCE (namespace, type, resource)
| 3.313136
| 3.211528
| 1.031638
|
from .setup_helpers import _module_state
if _module_state['registered_commands'] is None:
raise RuntimeError(
'astropy_helpers.setup_helpers.register_commands() must be '
'called before using '
'astropy_helpers.setup_helpers.get_dummy_distribution()')
# Pre-parse the Distutils command-line options and config files to if
# the option is set.
dist = Distribution({'script_name': os.path.basename(sys.argv[0]),
'script_args': sys.argv[1:]})
dist.cmdclass.update(_module_state['registered_commands'])
with silence():
try:
dist.parse_config_files()
dist.parse_command_line()
except (DistutilsError, AttributeError, SystemExit):
# Let distutils handle DistutilsErrors itself AttributeErrors can
# get raise for ./setup.py --help SystemExit can be raised if a
# display option was used, for example
pass
return dist
|
def get_dummy_distribution()
|
Returns a distutils Distribution object used to instrument the setup
environment before calling the actual setup() function.
| 5.499392
| 5.27138
| 1.043255
|
dist = get_dummy_distribution()
for cmd in commands:
cmd_opts = dist.command_options.get(cmd)
if cmd_opts is not None and option in cmd_opts:
return cmd_opts[option][1]
else:
return None
|
def get_distutils_option(option, commands)
|
Returns the value of the given distutils option.
Parameters
----------
option : str
The name of the option
commands : list of str
The list of commands on which this option is available
Returns
-------
val : str or None
the value of the given distutils option. If the option is not set,
returns None.
| 3.462501
| 3.849166
| 0.899546
|
dist = get_dummy_distribution()
cmdcls = dist.get_command_class(command)
if (hasattr(cmdcls, '_astropy_helpers_options') and
name in cmdcls._astropy_helpers_options):
return
attr = name.replace('-', '_')
if hasattr(cmdcls, attr):
raise RuntimeError(
'{0!r} already has a {1!r} class attribute, barring {2!r} from '
'being usable as a custom option name.'.format(cmdcls, attr, name))
for idx, cmd in enumerate(cmdcls.user_options):
if cmd[0] == name:
log.warn('Overriding existing {0!r} option '
'{1!r}'.format(command, name))
del cmdcls.user_options[idx]
if name in cmdcls.boolean_options:
cmdcls.boolean_options.remove(name)
break
cmdcls.user_options.append((name, None, doc))
if is_bool:
cmdcls.boolean_options.append(name)
# Distutils' command parsing requires that a command object have an
# attribute with the same name as the option (with '-' replaced with '_')
# in order for that option to be recognized as valid
setattr(cmdcls, attr, None)
# This caches the options added through add_command_option so that if it is
# run multiple times in the same interpreter repeated adds are ignored
# (this way we can still raise a RuntimeError if a custom option overrides
# a built-in option)
if not hasattr(cmdcls, '_astropy_helpers_options'):
cmdcls._astropy_helpers_options = set([name])
else:
cmdcls._astropy_helpers_options.add(name)
|
def add_command_option(command, name, doc, is_bool=False)
|
Add a custom option to a setup command.
Issues a warning if the option already exists on that command.
Parameters
----------
command : str
The name of the command as given on the command line
name : str
The name of the build option
doc : str
A short description of the option, for the `--help` message
is_bool : bool, optional
When `True`, the option is a boolean option and doesn't
require an associated value.
| 3.610359
| 3.61277
| 0.999333
|
short_display_opts = set('-' + o[1] for o in Distribution.display_options
if o[1])
long_display_opts = set('--' + o[0] for o in Distribution.display_options)
# Include -h and --help which are not explicitly listed in
# Distribution.display_options (as they are handled by optparse)
short_display_opts.add('-h')
long_display_opts.add('--help')
# This isn't the greatest approach to hardcode these commands.
# However, there doesn't seem to be a good way to determine
# whether build *will be* run as part of the command at this
# phase.
display_commands = set([
'clean', 'register', 'setopt', 'saveopts', 'egg_info',
'alias'])
return short_display_opts.union(long_display_opts.union(display_commands))
|
def get_distutils_display_options()
|
Returns a set of all the distutils display options in their long and
short forms. These are the setup.py arguments such as --name or --version
which print the project's metadata and then exit.
Returns
-------
opts : set
The long and short form display option arguments, including the - or --
| 5.561899
| 5.882874
| 0.945439
|
# We've split out the Sphinx part of astropy-helpers into sphinx-astropy
# but we want it to be auto-installed seamlessly for anyone using
# build_docs. We check if it's already installed, and if not, we install
# it to a local .eggs directory and add the eggs to the path (these
# have to each be added to the path, we can't add them by simply adding
# .eggs to the path)
sys_path_inserts = []
sphinx_astropy_version = None
try:
from sphinx_astropy import __version__ as sphinx_astropy_version # noqa
except ImportError:
from setuptools import Distribution
dist = Distribution()
# Numpydoc 0.9.0 requires sphinx 1.6+, we can limit the version here
# until we also makes our minimum required version Sphinx 1.6
if SPHINX_LT_16:
dist.fetch_build_eggs('numpydoc<0.9')
# This egg build doesn't respect python_requires, not aware of
# pre-releases. We know that mpl 3.1+ requires Python 3.6+, so this
# ugly workaround takes care of it until there is a solution for
# https://github.com/astropy/astropy-helpers/issues/462
if LooseVersion(sys.version) < LooseVersion('3.6'):
dist.fetch_build_eggs('matplotlib<3.1')
eggs = dist.fetch_build_eggs('sphinx-astropy')
# Find out the version of sphinx-astropy if possible. For some old
# setuptools version, eggs will be None even if sphinx-astropy was
# successfully installed.
if eggs is not None:
for egg in eggs:
if egg.project_name == 'sphinx-astropy':
sphinx_astropy_version = egg.parsed_version.public
break
eggs_path = os.path.abspath('.eggs')
for egg in glob.glob(os.path.join(eggs_path, '*.egg')):
sys_path_inserts.append(egg)
return sphinx_astropy_version, sys_path_inserts
|
def ensure_sphinx_astropy_installed()
|
Make sure that sphinx-astropy is available, installing it temporarily if not.
This returns the available version of sphinx-astropy as well as any
paths that should be added to sys.path for sphinx-astropy to be available.
| 4.989729
| 4.946453
| 1.008749
|
plat_specifier = '.{0}-{1}'.format(cmd.plat_name, sys.version[0:3])
return os.path.join(cmd.build_base, 'lib' + plat_specifier)
|
def _get_platlib_dir(cmd)
|
Given a build command, return the name of the appropriate platform-specific
build subdirectory directory (e.g. build/lib.linux-x86_64-2.7)
| 3.969333
| 3.555895
| 1.116268
|
# We need to go through this nonsense in case setuptools
# downloaded and installed Numpy for us as part of the build or
# install, since Numpy may still think it's in "setup mode", when
# in fact we're ready to use it to build astropy now.
import builtins
if hasattr(builtins, '__NUMPY_SETUP__'):
del builtins.__NUMPY_SETUP__
import imp
import numpy
imp.reload(numpy)
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
return numpy_include
|
def get_numpy_include_path()
|
Gets the path to the numpy headers.
| 6.635911
| 6.758351
| 0.981883
|
name = os.path.basename(os.path.abspath(filepath))
if isinstance(name, bytes):
is_dotted = name.startswith(b'.')
else:
is_dotted = name.startswith('.')
return is_dotted or _has_hidden_attribute(filepath)
|
def is_path_hidden(filepath)
|
Determines if a given file or directory is hidden.
Parameters
----------
filepath : str
The path to a file or directory
Returns
-------
hidden : bool
Returns `True` if the file is hidden
| 3.568002
| 3.814139
| 0.935467
|
for root, dirs, files in os.walk(
top, topdown=True, onerror=onerror,
followlinks=followlinks):
# These lists must be updated in-place so os.walk will skip
# hidden directories
dirs[:] = [d for d in dirs if not is_path_hidden(d)]
files[:] = [f for f in files if not is_path_hidden(f)]
yield root, dirs, files
|
def walk_skip_hidden(top, onerror=None, followlinks=False)
|
A wrapper for `os.walk` that skips hidden files and directories.
This function does not have the parameter `topdown` from
`os.walk`: the directories must always be recursed top-down when
using this function.
See also
--------
os.walk : For a description of the parameters
| 2.770944
| 3.25673
| 0.850836
|
assert isinstance(data, bytes)
if os.path.exists(filename):
with open(filename, 'rb') as fd:
original_data = fd.read()
else:
original_data = None
if original_data != data:
with open(filename, 'wb') as fd:
fd.write(data)
|
def write_if_different(filename, data)
|
Write `data` to `filename`, if the content of the file is different.
Parameters
----------
filename : str
The file name to be written to.
data : bytes
The data to be written to `filename`.
| 2.121141
| 2.038757
| 1.040409
|
# Specifying a traditional dot-separated fully qualified name here
# results in a number of "Parent module 'astropy' not found while
# handling absolute import" warnings. Using the same name, the
# namespaces of the modules get merged together. So, this
# generates an underscore-separated name which is more likely to
# be unique, and it doesn't really matter because the name isn't
# used directly here anyway.
mode = 'r'
if name is None:
basename = os.path.splitext(filename)[0]
name = '_'.join(os.path.relpath(basename).split(os.sep)[1:])
if not os.path.exists(filename):
raise ImportError('Could not import file {0}'.format(filename))
if import_machinery:
loader = import_machinery.SourceFileLoader(name, filename)
mod = loader.load_module()
else:
with open(filename, mode) as fd:
mod = imp.load_module(name, fd, filename, ('.py', mode, 1))
return mod
|
def import_file(filename, name=None)
|
Imports a module from a single file as if it doesn't belong to a
particular package.
The returned module will have the optional ``name`` if given, or else
a name generated from the filename.
| 4.799503
| 4.993205
| 0.961207
|
parts = name.split('.')
cursor = len(parts) - 1
module_name = parts[:cursor]
attr_name = parts[-1]
while cursor > 0:
try:
ret = __import__('.'.join(module_name), fromlist=[attr_name])
break
except ImportError:
if cursor == 0:
raise
cursor -= 1
module_name = parts[:cursor]
attr_name = parts[cursor]
ret = ''
for part in parts[cursor:]:
try:
ret = getattr(ret, part)
except AttributeError:
raise ImportError(name)
return ret
|
def resolve_name(name)
|
Resolve a name like ``module.object`` to an object and return it.
Raise `ImportError` if the module or name is not found.
| 2.372162
| 2.218184
| 1.069416
|
def decorator(func):
if not (extended_func.__doc__ is None or func.__doc__ is None):
func.__doc__ = '\n\n'.join([extended_func.__doc__.rstrip('\n'),
func.__doc__.lstrip('\n')])
return func
return decorator
|
def extends_doc(extended_func)
|
A function decorator for use when wrapping an existing function but adding
additional functionality. This copies the docstring from the original
function, and appends to it (along with a newline) the docstring of the
wrapper function.
Examples
--------
>>> def foo():
... '''Hello.'''
...
>>> @extends_doc(foo)
... def bar():
... '''Goodbye.'''
...
>>> print(bar.__doc__)
Hello.
Goodbye.
| 2.82989
| 3.508193
| 0.806652
|
return glob.glob(os.path.join(package, pattern), recursive=True)
|
def find_data_files(package, pattern)
|
Include files matching ``pattern`` inside ``package``.
Parameters
----------
package : str
The package inside which to look for data files
pattern : str
Pattern (glob-style) to match for the data files (e.g. ``*.dat``).
This supports the``**``recursive syntax. For example, ``**/*.fits``
matches all files ending with ``.fits`` recursively. Only one
instance of ``**`` can be included in the pattern.
| 4.495942
| 5.423084
| 0.829038
|
parsed_version = pkg_resources.parse_version(version)
if hasattr(parsed_version, 'base_version'):
# New version parsing for setuptools >= 8.0
if parsed_version.base_version:
parts = [int(part)
for part in parsed_version.base_version.split('.')]
else:
parts = []
else:
parts = []
for part in parsed_version:
if part.startswith('*'):
# Ignore any .dev, a, b, rc, etc.
break
parts.append(int(part))
if len(parts) < 3:
parts += [0] * (3 - len(parts))
# In principle a version could have more parts (like 1.2.3.4) but we only
# support <major>.<minor>.<micro>
return tuple(parts[:3])
|
def _version_split(version)
|
Split a version string into major, minor, and bugfix numbers. If any of
those numbers are missing the default is zero. Any pre/post release
modifiers are ignored.
Examples
========
>>> _version_split('1.2.3')
(1, 2, 3)
>>> _version_split('1.2')
(1, 2, 0)
>>> _version_split('1.2rc1')
(1, 2, 0)
>>> _version_split('1')
(1, 0, 0)
>>> _version_split('')
(0, 0, 0)
| 3.718184
| 3.965306
| 0.937679
|
loader = pkgutil.get_loader(git_helpers)
source = loader.get_source(git_helpers.__name__) or ''
source_lines = source.splitlines()
if not source_lines:
log.warn('Cannot get source code for astropy_helpers.git_helpers; '
'git support disabled.')
return ''
idx = 0
for idx, line in enumerate(source_lines):
if line.startswith('# BEGIN'):
break
git_helpers_py = '\n'.join(source_lines[idx + 1:])
verstr = version
new_githash = git_helpers.get_git_devstr(sha=True, show_warning=False)
if new_githash:
githash = new_githash
return _FROZEN_VERSION_PY_WITH_GIT_HEADER.format(
git_helpers=git_helpers_py, packagename=packagename,
verstr=verstr, githash=githash)
|
def _generate_git_header(packagename, version, githash)
|
Generates a header to the version.py module that includes utilities for
probing the git repository for updates (to the current git hash, etc.)
These utilities should only be available in development versions, and not
in release builds.
If this fails for any reason an empty string is returned.
| 4.075638
| 4.04543
| 1.007467
|
version = import_file(os.path.join(packagename, 'version.py'), name='version')
if fromlist:
return tuple(getattr(version, member) for member in fromlist)
else:
return version
|
def get_pkg_version_module(packagename, fromlist=None)
|
Returns the package's .version module generated by
`astropy_helpers.version_helpers.generate_version_py`. Raises an
ImportError if the version module is not found.
If ``fromlist`` is an iterable, return a tuple of the members of the
version module corresponding to the member names given in ``fromlist``.
Raises an `AttributeError` if any of these module members are not found.
| 3.565072
| 3.530395
| 1.009822
|
# DEPRECATED: store the package name in a built-in variable so it's easy
# to get from other parts of the setup infrastructure. We should phase this
# out in packages that use it - the cookiecutter template should now be
# able to put the right package name where needed.
conf = read_configuration('setup.cfg')
builtins._ASTROPY_PACKAGE_NAME_ = conf['metadata']['name']
# Create a dictionary with setup command overrides. Note that this gets
# information about the package (name and version) from the setup.cfg file.
cmdclass = register_commands()
# Freeze build information in version.py. Note that this gets information
# about the package (name and version) from the setup.cfg file.
version = generate_version_py()
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
package_info['cmdclass'] = cmdclass
package_info['version'] = version
# Override using any specified keyword arguments
package_info.update(kwargs)
setuptools_setup(**package_info)
|
def setup(**kwargs)
|
A wrapper around setuptools' setup() function that automatically sets up
custom commands, generates a version file, and customizes the setup process
via the ``setup_package.py`` files.
| 6.756776
| 6.629346
| 1.019222
|
try:
current_debug = get_pkg_version_module(packagename,
fromlist=['debug'])[0]
except (ImportError, AttributeError):
current_debug = None
# Only modify the debug flag if one of the build commands was explicitly
# run (i.e. not as a sub-command of something else)
dist = get_dummy_distribution()
if any(cmd in dist.commands for cmd in ['build', 'build_ext']):
debug = bool(get_distutils_build_option('debug'))
else:
debug = bool(current_debug)
if current_debug is not None and current_debug != debug:
build_ext_cmd = dist.get_command_class('build_ext')
build_ext_cmd._force_rebuild = True
return debug
|
def get_debug_option(packagename)
|
Determines if the build is in debug mode.
Returns
-------
debug : bool
True if the current build was started with the debug option, False
otherwise.
| 4.68747
| 4.77384
| 0.981908
|
if package is not None:
warnings.warn('The package argument to generate_version_py has '
'been deprecated and will be removed in future. Specify '
'the package name in setup.cfg instead', AstropyDeprecationWarning)
if version is not None:
warnings.warn('The version argument to generate_version_py has '
'been deprecated and will be removed in future. Specify '
'the version number in setup.cfg instead', AstropyDeprecationWarning)
if release is not None:
warnings.warn('The release argument to generate_version_py has '
'been deprecated and will be removed in future. We now '
'use the presence of the "dev" string in the version to '
'determine whether this is a release', AstropyDeprecationWarning)
# We use ConfigParser instead of read_configuration here because the latter
# only reads in keys recognized by setuptools, but we need to access
# package_name below.
conf = ConfigParser()
conf.read('setup.cfg')
if conf.has_option('metadata', 'name'):
package = conf.get('metadata', 'name')
elif conf.has_option('metadata', 'package_name'):
# The package-template used package_name instead of name for a while
warnings.warn('Specifying the package name using the "package_name" '
'option in setup.cfg is deprecated - use the "name" '
'option instead.', AstropyDeprecationWarning)
package = conf.get('metadata', 'package_name')
elif package is not None: # deprecated
pass
else:
sys.stderr.write('ERROR: Could not read package name from setup.cfg\n')
sys.exit(1)
if _module_state['registered_commands'] is not None:
return _module_state['registered_commands']
if _module_state['have_sphinx']:
try:
from .commands.build_sphinx import (AstropyBuildSphinx,
AstropyBuildDocs)
except ImportError:
AstropyBuildSphinx = AstropyBuildDocs = FakeBuildSphinx
else:
AstropyBuildSphinx = AstropyBuildDocs = FakeBuildSphinx
_module_state['registered_commands'] = registered_commands = {
'test': generate_test_command(package),
# Use distutils' sdist because it respects package_data.
# setuptools/distributes sdist requires duplication of information in
# MANIFEST.in
'sdist': DistutilsSdist,
'build_ext': AstropyHelpersBuildExt,
'build_sphinx': AstropyBuildSphinx,
'build_docs': AstropyBuildDocs
}
# Need to override the __name__ here so that the commandline options are
# presented as being related to the "build" command, for example; normally
# this wouldn't be necessary since commands also have a command_name
# attribute, but there is a bug in distutils' help display code that it
# uses __name__ instead of command_name. Yay distutils!
for name, cls in registered_commands.items():
cls.__name__ = name
# Add a few custom options; more of these can be added by specific packages
# later
for option in [
('use-system-libraries',
"Use system libraries whenever possible", True)]:
add_command_option('build', *option)
add_command_option('install', *option)
add_command_hooks(registered_commands, srcdir=srcdir)
return registered_commands
|
def register_commands(package=None, version=None, release=None, srcdir='.')
|
This function generates a dictionary containing customized commands that
can then be passed to the ``cmdclass`` argument in ``setup()``.
| 3.750112
| 3.773381
| 0.993833
|
hook_re = re.compile(r'^(pre|post)_(.+)_hook$')
# Distutils commands have a method of the same name, but it is not a
# *classmethod* (which probably didn't exist when distutils was first
# written)
def get_command_name(cmdcls):
if hasattr(cmdcls, 'command_name'):
return cmdcls.command_name
else:
return cmdcls.__name__
packages = find_packages(srcdir)
dist = get_dummy_distribution()
hooks = collections.defaultdict(dict)
for setuppkg in iter_setup_packages(srcdir, packages):
for name, obj in vars(setuppkg).items():
match = hook_re.match(name)
if not match:
continue
hook_type = match.group(1)
cmd_name = match.group(2)
if hook_type not in hooks[cmd_name]:
hooks[cmd_name][hook_type] = []
hooks[cmd_name][hook_type].append((setuppkg.__name__, obj))
for cmd_name, cmd_hooks in hooks.items():
commands[cmd_name] = generate_hooked_command(
cmd_name, dist.get_command_class(cmd_name), cmd_hooks)
|
def add_command_hooks(commands, srcdir='.')
|
Look through setup_package.py modules for functions with names like
``pre_<command_name>_hook`` and ``post_<command_name>_hook`` where
``<command_name>`` is the name of a ``setup.py`` command (e.g. build_ext).
If either hook is present this adds a wrapped version of that command to
the passed in ``commands`` `dict`. ``commands`` may be pre-populated with
other custom distutils command classes that should be wrapped if there are
hooks for them (e.g. `AstropyBuildPy`).
| 3.201168
| 2.900088
| 1.103818
|
def run(self, orig_run=cmd_cls.run):
self.run_command_hooks('pre_hooks')
orig_run(self)
self.run_command_hooks('post_hooks')
return type(cmd_name, (cmd_cls, object),
{'run': run, 'run_command_hooks': run_command_hooks,
'pre_hooks': hooks.get('pre', []),
'post_hooks': hooks.get('post', [])})
|
def generate_hooked_command(cmd_name, cmd_cls, hooks)
|
Returns a generated subclass of ``cmd_cls`` that runs the pre- and
post-command hooks for that command before and after the ``cmd_cls.run``
method.
| 2.868161
| 2.513511
| 1.141097
|
hooks = getattr(cmd_obj, hook_kind, None)
if not hooks:
return
for modname, hook in hooks:
if isinstance(hook, str):
try:
hook_obj = resolve_name(hook)
except ImportError as exc:
raise DistutilsModuleError(
'cannot find hook {0}: {1}'.format(hook, exc))
else:
hook_obj = hook
if not callable(hook_obj):
raise DistutilsOptionError('hook {0!r} is not callable' % hook)
log.info('running {0} from {1} for {2} command'.format(
hook_kind.rstrip('s'), modname, cmd_obj.get_command_name()))
try:
hook_obj(cmd_obj)
except Exception:
log.error('{0} command hook {1} raised an exception: %s\n'.format(
hook_obj.__name__, cmd_obj.get_command_name()))
log.error(traceback.format_exc())
sys.exit(1)
|
def run_command_hooks(cmd_obj, hook_kind)
|
Run hooks registered for that command and phase.
*cmd_obj* is a finalized command object; *hook_kind* is either
'pre_hook' or 'post_hook'.
| 2.6032
| 2.656852
| 0.979806
|
info = get_package_info(srcdir)
extensions.extend(info['ext_modules'])
package_data.update(info['package_data'])
packagenames = list(set(packagenames + info['packages']))
package_dirs.update(info['package_dir'])
|
def update_package_files(srcdir, extensions, package_data, packagenames,
package_dirs)
|
This function is deprecated and maintained for backward compatibility
with affiliated packages. Affiliated packages should update their
setup.py to use `get_package_info` instead.
| 2.488942
| 2.277457
| 1.09286
|
for packagename in packages:
package_parts = packagename.split('.')
package_path = os.path.join(srcdir, *package_parts)
setup_package = os.path.relpath(
os.path.join(package_path, 'setup_package.py'))
if os.path.isfile(setup_package):
module = import_file(setup_package,
name=packagename + '.setup_package')
yield module
|
def iter_setup_packages(srcdir, packages)
|
A generator that finds and imports all of the ``setup_package.py``
modules in the source packages.
Returns
-------
modgen : generator
A generator that yields (modname, mod), where `mod` is the module and
`modname` is the module name for the ``setup_package.py`` modules.
| 2.671473
| 2.760737
| 0.967667
|
for dirpath, dirnames, filenames in walk_skip_hidden(package_dir):
for fn in filenames:
if fn.endswith('.pyx'):
fullfn = os.path.relpath(os.path.join(dirpath, fn))
# Package must match file name
extmod = '.'.join([package_name, fn[:-4]])
yield (extmod, fullfn)
break
|
def iter_pyx_files(package_dir, package_name)
|
A generator that yields Cython source files (ending in '.pyx') in the
source packages.
Returns
-------
pyxgen : generator
A generator that yields (extmod, fullfn) where `extmod` is the
full name of the module that the .pyx file would live in based
on the source directory structure, and `fullfn` is the path to
the .pyx file.
| 4.046406
| 3.262777
| 1.240173
|
# Vanilla setuptools and old versions of distribute include Cython files
# as .c files in the sources, not .pyx, so we cannot simply look for
# existing .pyx sources in the previous sources, but we should also check
# for .c files with the same remaining filename. So we look for .pyx and
# .c files, and we strip the extension.
prevsourcepaths = []
ext_modules = []
for ext in prevextensions:
for s in ext.sources:
if s.endswith(('.pyx', '.c', '.cpp')):
sourcepath = os.path.realpath(os.path.splitext(s)[0])
prevsourcepaths.append(sourcepath)
for package_name in packages:
package_parts = package_name.split('.')
package_path = os.path.join(srcdir, *package_parts)
for extmod, pyxfn in iter_pyx_files(package_path, package_name):
sourcepath = os.path.realpath(os.path.splitext(pyxfn)[0])
if sourcepath not in prevsourcepaths:
ext_modules.append(Extension(extmod, [pyxfn],
include_dirs=extincludedirs))
return ext_modules
|
def get_cython_extensions(srcdir, packages, prevextensions=tuple(),
extincludedirs=None)
|
Looks for Cython files and generates Extensions if needed.
Parameters
----------
srcdir : str
Path to the root of the source directory to search.
prevextensions : list of `~distutils.core.Extension` objects
The extensions that are already defined. Any .pyx files already here
will be ignored.
extincludedirs : list of str or None
Directories to include as the `include_dirs` argument to the generated
`~distutils.core.Extension` objects.
Returns
-------
exts : list of `~distutils.core.Extension` objects
The new extensions that are needed to compile all .pyx files (does not
include any already in `prevextensions`).
| 3.689478
| 3.830822
| 0.963103
|
flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries',
'-D': 'define_macros', '-U': 'undef_macros'}
command = "{0} --libs --cflags {1}".format(executable, ' '.join(packages)),
result = DistutilsExtensionArgs()
try:
pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
output = pipe.communicate()[0].strip()
except subprocess.CalledProcessError as e:
lines = [
("{0} failed. This may cause the build to fail below."
.format(executable)),
" command: {0}".format(e.cmd),
" returncode: {0}".format(e.returncode),
" output: {0}".format(e.output)
]
log.warn('\n'.join(lines))
result['libraries'].extend(default_libraries)
else:
if pipe.returncode != 0:
lines = [
"pkg-config could not lookup up package(s) {0}.".format(
", ".join(packages)),
"This may cause the build to fail below."
]
log.warn('\n'.join(lines))
result['libraries'].extend(default_libraries)
else:
for token in output.split():
# It's not clear what encoding the output of
# pkg-config will come to us in. It will probably be
# some combination of pure ASCII (for the compiler
# flags) and the filesystem encoding (for any argument
# that includes directories or filenames), but this is
# just conjecture, as the pkg-config documentation
# doesn't seem to address it.
arg = token[:2].decode('ascii')
value = token[2:].decode(sys.getfilesystemencoding())
if arg in flag_map:
if arg == '-D':
value = tuple(value.split('=', 1))
result[flag_map[arg]].append(value)
else:
result['extra_compile_args'].append(value)
return result
|
def pkg_config(packages, default_libraries, executable='pkg-config')
|
Uses pkg-config to update a set of distutils Extension arguments
to include the flags necessary to link against the given packages.
If the pkg-config lookup fails, default_libraries is applied to
libraries.
Parameters
----------
packages : list of str
A list of pkg-config packages to look up.
default_libraries : list of str
A list of library names to use if the pkg-config lookup fails.
Returns
-------
config : dict
A dictionary containing keyword arguments to
`distutils.Extension`. These entries include:
- ``include_dirs``: A list of include directories
- ``library_dirs``: A list of library directories
- ``libraries``: A list of libraries
- ``define_macros``: A list of macro defines
- ``undef_macros``: A list of macros to undefine
- ``extra_compile_args``: A list of extra arguments to pass to
the compiler
| 3.28361
| 3.169997
| 1.03584
|
for command in ['build', 'build_ext', 'install']:
add_command_option(command, str('use-system-' + library),
'Use the system {0} library'.format(library),
is_bool=True)
|
def add_external_library(library)
|
Add a build option for selecting the internal or system copy of a library.
Parameters
----------
library : str
The name of the library. If the library is `foo`, the build
option will be called `--use-system-foo`.
| 7.33002
| 6.104935
| 1.200671
|
if exclude:
warnings.warn(
"Use of the exclude parameter is no longer supported since it does "
"not work as expected. Use add_exclude_packages instead. Note that "
"it must be called prior to any other calls from setup helpers.",
AstropyDeprecationWarning)
# Calling add_exclude_packages after this point will have no effect
_module_state['excludes_too_late'] = True
if not invalidate_cache and _module_state['package_cache'] is not None:
return _module_state['package_cache']
packages = _find_packages(
where=where, exclude=list(_module_state['exclude_packages']))
_module_state['package_cache'] = packages
return packages
|
def find_packages(where='.', exclude=(), invalidate_cache=False)
|
This version of ``find_packages`` caches previous results to speed up
subsequent calls. Use ``invalide_cache=True`` to ignore cached results
from previous ``find_packages`` calls, and repeat the package search.
| 5.08936
| 5.359656
| 0.949568
|
# Only build with Cython if, of course, Cython is installed, we're in a
# development version (i.e. not release) or the Cython-generated source
# files haven't been created yet (cython_version == 'unknown'). The latter
# case can happen even when release is True if checking out a release tag
# from the repository
have_cython = False
try:
from Cython import __version__ as cython_version # noqa
have_cython = True
except ImportError:
pass
if have_cython and (not is_release or previous_cython_version == 'unknown'):
return cython_version
else:
return False
|
def should_build_with_cython(previous_cython_version, is_release)
|
Returns the previously used Cython version (or 'unknown' if not
previously built) if Cython should be used to build extension modules from
pyx files.
| 4.924437
| 4.549611
| 1.082386
|
# Determine the compiler we'll be using
if self.compiler is None:
compiler = get_default_compiler()
else:
compiler = self.compiler
# Replace .pyx with C-equivalents, unless c files are missing
for jdx, src in enumerate(extension.sources):
base, ext = os.path.splitext(src)
pyxfn = base + '.pyx'
cfn = base + '.c'
cppfn = base + '.cpp'
if not os.path.isfile(pyxfn):
continue
if self._uses_cython:
extension.sources[jdx] = pyxfn
else:
if os.path.isfile(cfn):
extension.sources[jdx] = cfn
elif os.path.isfile(cppfn):
extension.sources[jdx] = cppfn
else:
msg = (
'Could not find C/C++ file {0}.(c/cpp) for Cython '
'file {1} when building extension {2}. Cython '
'must be installed to build from a git '
'checkout.'.format(base, pyxfn, extension.name))
raise IOError(errno.ENOENT, msg, cfn)
# Cython (at least as of 0.29.2) uses deprecated Numpy API features
# the use of which produces a few warnings when compiling.
# These additional flags should squelch those warnings.
# TODO: Feel free to remove this if/when a Cython update
# removes use of the deprecated Numpy API
if compiler == 'unix':
extension.extra_compile_args.extend([
'-Wp,-w', '-Wno-unused-function'])
|
def _check_cython_sources(self, extension)
|
Where relevant, make sure that the .c files associated with .pyx
modules are present (if building without Cython installed).
| 4.066736
| 3.984286
| 1.020694
|
if sys.platform.startswith('win'):
return None
# Simple input validation
if not var or not flag:
return None
flag_length = len(flag)
if not flag_length:
return None
# Look for var in os.eviron then in get_config_var
if var in os.environ:
flags = os.environ[var]
else:
try:
flags = get_config_var(var)
except KeyError:
return None
# Extract flag from {var:value}
if flags:
for item in flags.split(delim):
if item.startswith(flag):
return item[flag_length:]
|
def _get_flag_value_from_var(flag, var, delim=' ')
|
Extract flags from an environment variable.
Parameters
----------
flag : str
The flag to extract, for example '-I' or '-L'
var : str
The environment variable to extract the flag from, e.g. CFLAGS or LDFLAGS.
delim : str, optional
The delimiter separating flags inside the environment variable
Examples
--------
Let's assume the LDFLAGS is set to '-L/usr/local/include -customflag'. This
function will then return the following:
>>> _get_flag_value_from_var('-L', 'LDFLAGS')
'/usr/local/include'
Notes
-----
Environment variables are first checked in ``os.environ[var]``, then in
``distutils.sysconfig.get_config_var(var)``.
This function is not supported on Windows.
| 4.220159
| 3.355831
| 1.25756
|
compile_flags = []
link_flags = []
if get_compiler_option() == 'msvc':
compile_flags.append('-openmp')
else:
include_path = _get_flag_value_from_var('-I', 'CFLAGS')
if include_path:
compile_flags.append('-I' + include_path)
lib_path = _get_flag_value_from_var('-L', 'LDFLAGS')
if lib_path:
link_flags.append('-L' + lib_path)
link_flags.append('-Wl,-rpath,' + lib_path)
compile_flags.append('-fopenmp')
link_flags.append('-fopenmp')
return {'compiler_flags': compile_flags, 'linker_flags': link_flags}
|
def get_openmp_flags()
|
Utility for returning compiler and linker flags possibly needed for
OpenMP support.
Returns
-------
result : `{'compiler_flags':<flags>, 'linker_flags':<flags>}`
Notes
-----
The flags returned are not tested for validity, use
`check_openmp_support(openmp_flags=get_openmp_flags())` to do so.
| 2.227362
| 2.199712
| 1.01257
|
ccompiler = new_compiler()
customize_compiler(ccompiler)
if not openmp_flags:
# customize_compiler() extracts info from os.environ. If certain keys
# exist it uses these plus those from sysconfig.get_config_vars().
# If the key is missing in os.environ it is not extracted from
# sysconfig.get_config_var(). E.g. 'LDFLAGS' get left out, preventing
# clang from finding libomp.dylib because -L<path> is not passed to
# linker. Call get_openmp_flags() to get flags missed by
# customize_compiler().
openmp_flags = get_openmp_flags()
compile_flags = openmp_flags.get('compiler_flags')
link_flags = openmp_flags.get('linker_flags')
# Pass -coverage flag to linker.
# https://github.com/astropy/astropy-helpers/pull/374
if '-coverage' in compile_flags and '-coverage' not in link_flags:
link_flags.append('-coverage')
tmp_dir = tempfile.mkdtemp()
start_dir = os.path.abspath('.')
try:
os.chdir(tmp_dir)
# Write test program
with open('test_openmp.c', 'w') as f:
f.write(CCODE)
os.mkdir('objects')
# Compile, test program
ccompiler.compile(['test_openmp.c'], output_dir='objects',
extra_postargs=compile_flags)
# Link test program
objects = glob.glob(os.path.join('objects', '*' + ccompiler.obj_extension))
ccompiler.link_executable(objects, 'test_openmp',
extra_postargs=link_flags)
# Run test program
output = subprocess.check_output('./test_openmp')
output = output.decode(sys.stdout.encoding or 'utf-8').splitlines()
if 'nthreads=' in output[0]:
nthreads = int(output[0].strip().split('=')[1])
if len(output) == nthreads:
is_openmp_supported = True
else:
log.warn("Unexpected number of lines from output of test OpenMP "
"program (output was {0})".format(output))
is_openmp_supported = False
else:
log.warn("Unexpected output from test OpenMP "
"program (output was {0})".format(output))
is_openmp_supported = False
except (CompileError, LinkError, subprocess.CalledProcessError):
is_openmp_supported = False
finally:
os.chdir(start_dir)
return is_openmp_supported
|
def check_openmp_support(openmp_flags=None)
|
Check whether OpenMP test code can be compiled and run.
Parameters
----------
openmp_flags : dict, optional
This should be a dictionary with keys ``compiler_flags`` and
``linker_flags`` giving the compiliation and linking flags respectively.
These are passed as `extra_postargs` to `compile()` and
`link_executable()` respectively. If this is not set, the flags will
be automatically determined using environment variables.
Returns
-------
result : bool
`True` if the test passed, `False` otherwise.
| 3.155651
| 3.109008
| 1.015003
|
log_threshold = log.set_threshold(log.FATAL)
ret = check_openmp_support()
log.set_threshold(log_threshold)
return ret
|
def is_openmp_supported()
|
Determine whether the build compiler has OpenMP support.
| 5.693113
| 5.550714
| 1.025654
|
if _ASTROPY_DISABLE_SETUP_WITH_OPENMP_:
log.info("OpenMP support has been explicitly disabled.")
return False
openmp_flags = get_openmp_flags()
using_openmp = check_openmp_support(openmp_flags=openmp_flags)
if using_openmp:
compile_flags = openmp_flags.get('compiler_flags')
link_flags = openmp_flags.get('linker_flags')
log.info("Compiling Cython/C/C++ extension with OpenMP support")
extension.extra_compile_args.extend(compile_flags)
extension.extra_link_args.extend(link_flags)
else:
log.warn("Cannot compile Cython/C/C++ extension with OpenMP, reverting "
"to non-parallel code")
return using_openmp
|
def add_openmp_flags_if_available(extension)
|
Add OpenMP compilation flags, if supported (if not a warning will be
printed to the console and no flags will be added.)
Returns `True` if the flags were added, `False` otherwise.
| 3.150286
| 3.10684
| 1.013984
|
if packagename.lower() == 'astropy':
packagetitle = 'Astropy'
else:
packagetitle = packagename
epoch = int(os.environ.get('SOURCE_DATE_EPOCH', time.time()))
timestamp = datetime.datetime.utcfromtimestamp(epoch)
if disable_openmp is not None:
import builtins
builtins._ASTROPY_DISABLE_SETUP_WITH_OPENMP_ = disable_openmp
if _ASTROPY_DISABLE_SETUP_WITH_OPENMP_:
log.info("OpenMP support has been explicitly disabled.")
openmp_support = False if _ASTROPY_DISABLE_SETUP_WITH_OPENMP_ else is_openmp_supported()
src = _IS_OPENMP_ENABLED_SRC.format(packagetitle=packagetitle,
timestamp=timestamp,
return_bool=openmp_support)
package_srcdir = os.path.join(srcdir, *packagename.split('.'))
is_openmp_enabled_py = os.path.join(package_srcdir, 'openmp_enabled.py')
with open(is_openmp_enabled_py, 'w') as f:
f.write(src)
|
def generate_openmp_enabled_py(packagename, srcdir='.', disable_openmp=None)
|
Generate ``package.openmp_enabled.is_openmp_enabled``, which can then be used
to determine, post build, whether the package was built with or without
OpenMP support.
| 2.99156
| 2.78132
| 1.07559
|
if len(self.dataframe):
return str(self.dataframe.iloc[row, col])
return ''
|
def GetValue(self, row, col)
|
Find the matching value from pandas DataFrame,
return it.
| 6.054592
| 5.014929
| 1.207314
|
self.dataframe.iloc[row, col] = value
|
def SetValue(self, row, col, value)
|
Set value in the pandas DataFrame
| 8.806677
| 5.237105
| 1.681593
|
try:
self.dataframe.iloc[:, col] = value
except ValueError:
self.dataframe.loc[:, col] = value
|
def SetColumnValues(self, col, value)
|
Custom method to efficiently set all values
in a column.
Parameters
----------
col : str or int
name or index position of column
value : list-like
values to assign to all cells in the column
| 4.124986
| 4.076813
| 1.011816
|
if len(self.dataframe):
return self.dataframe.columns[col]
return ''
|
def GetColLabelValue(self, col)
|
Get col label from dataframe
| 7.57479
| 5.366812
| 1.411413
|
if len(self.dataframe):
col_name = str(self.dataframe.columns[col])
self.dataframe.rename(columns={col_name: str(value)}, inplace=True)
return None
|
def SetColLabelValue(self, col, value)
|
Set col label value in dataframe
| 4.15359
| 3.543851
| 1.172056
|
try:
if len(self.row_labels) < 5:
show_horizontal = wx.SHOW_SB_NEVER
else:
show_horizontal = wx.SHOW_SB_DEFAULT
self.ShowScrollbars(show_horizontal, wx.SHOW_SB_DEFAULT)
except AttributeError:
pass
|
def set_scrollbars(self)
|
Set to always have vertical scrollbar.
Have horizontal scrollbar unless grid has very few rows.
Older versions of wxPython will choke on this,
in which case nothing happens.
| 4.038778
| 3.186968
| 1.267279
|
# replace "None" values with ""
dataframe = dataframe.fillna("")
# remove any columns that shouldn't be shown
for col in hide_cols:
if col in dataframe.columns:
del dataframe[col]
# add more rows
self.AppendRows(len(dataframe))
columns = dataframe.columns
row_num = -1
# fill in all rows with appropriate values
for ind, row in dataframe.iterrows():
row_num += 1
for col_num, col in enumerate(columns):
value = row[col]
self.SetCellValue(row_num, col_num, str(value))
# set citation default value
if col == 'citations':
citation = row['citations']
if (citation is None) or (citation is np.nan):
self.SetCellValue(row_num, col_num, 'This study')
else:
if 'This study' not in citation:
if len(citation):
citation += ':'
citation += 'This study'
self.SetCellValue(row_num, col_num, citation)
self.row_labels.extend(dataframe.index)
|
def add_items(self, dataframe, hide_cols=())
|
Add items and/or update existing items in grid
| 3.15247
| 3.108701
| 1.014079
|
if rows:
rows = rows
else:
rows = list(range(self.GetNumberRows()))
cols = list(range(self.GetNumberCols()))
data = {}
for row in rows:
data[row] = {}
for col in cols:
col_name = self.GetColLabelValue(col)
if verbose:
print(col_name, ":", self.GetCellValue(row, col))
data[row][col_name] = self.GetCellValue(row, col)
return data
|
def save_items(self, rows=None, verbose=False)
|
Return a dictionary of row data for selected rows:
{1: {col1: val1, col2: val2}, ...}
If a list of row numbers isn't provided, get data for all.
| 2.070246
| 1.845438
| 1.121818
|
if not self.changes:
self.changes = {event.Row}
else:
self.changes.add(event.Row)
#self.changes = True
try:
editor = event.GetControl()
editor.Bind(wx.EVT_KEY_DOWN, self.onEditorKey)
except AttributeError:
# if it's a EVT_GRID_EDITOR_SHOWN, it doesn't have the GetControl method
pass
|
def on_edit_grid(self, event)
|
sets self.changes to true when user edits the grid.
provides down and up key functionality for exiting the editor
| 4.614754
| 4.2147
| 1.094919
|
# find where the user has clicked
col_ind = self.GetGridCursorCol()
row_ind = self.GetGridCursorRow()
# read in clipboard text
text_df = pd.read_clipboard(header=None, sep='\t').fillna('')
# add extra rows if need to accomadate clipboard text
row_length_diff = len(text_df) - (len(self.row_labels) - row_ind)
if row_length_diff > 0:
for n in range(row_length_diff):
self.add_row()
# ignore excess columns if present
col_length_diff = len(text_df.columns) - (len(self.col_labels) - col_ind)
if col_length_diff > 0:
text_df = text_df.iloc[:, :-col_length_diff].copy()
# go through copied text and parse it into the grid rows
for label, row_data in text_df.iterrows():
col_range = list(range(col_ind, col_ind + len(row_data)))
if len(row_data) > 1:
cols = list(zip(col_range, row_data.index))
for column in cols:
value = row_data[column[1]]
this_col = column[0]
self.SetCellValue(row_ind, this_col, str(value))
else:
value = row_data[0]
self.SetCellValue(row_ind, col_ind, str(value))
row_ind += 1
# could instead use wxPython clipboard here
# see old git history for that
self.size_grid()
event.Skip()
|
def do_paste(self, event)
|
Read clipboard into dataframe
Paste data into grid, adding extra rows if needed
and ignoring extra columns.
| 3.298962
| 3.059175
| 1.078383
|
if row_num in self.changes.copy():
self.changes.remove(row_num)
updated_rows = []
for changed_row in self.changes:
if changed_row == -1:
updated_rows.append(-1)
if changed_row > row_num:
updated_rows.append(changed_row - 1)
if changed_row < row_num:
updated_rows.append(changed_row)
self.changes = set(updated_rows)
|
def update_changes_after_row_delete(self, row_num)
|
Update self.changes so that row numbers for edited rows are still correct.
I.e., if row 4 was edited and then row 2 was deleted, row 4 becomes row 3.
This function updates self.changes to reflect that.
| 2.076848
| 2.040482
| 1.017822
|
self.SetColLabelRenderer(col, MyColLabelRenderer('#1101e0'))
# SetCellRenderer doesn't work with table-based grid (HugeGrid class)
if not skip_cell:
self.SetCellRenderer(row, col, MyCustomRenderer(color))
|
def paint_invalid_cell(self, row, col, color='MEDIUM VIOLET RED',
skip_cell=False)
|
Take row, column, and turn it color
| 12.033475
| 11.888107
| 1.012228
|
self.table.dataframe[label] = ''
self.AppendCols(1, updateLabels=False)
last_col = self.table.GetNumberCols() - 1
self.SetColLabelValue(last_col, label)
self.col_labels.append(label)
self.size_grid()
return last_col
|
def add_col(self, label)
|
Update table dataframe, and append a new column
Parameters
----------
label : str
Returns
---------
last_col: int
index column number of added col
| 4.750505
| 5.511971
| 0.861852
|
label_value = self.GetColLabelValue(col_num).strip('**').strip('^^')
self.col_labels.remove(label_value)
del self.table.dataframe[label_value]
result = self.DeleteCols(pos=col_num, numCols=1, updateLabels=True)
self.size_grid()
return result
|
def remove_col(self, col_num)
|
update table dataframe, and remove a column.
resize grid to display correctly
| 6.487873
| 5.779639
| 1.122539
|
fmt,plot='svg',0
if len(sys.argv) > 0:
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-sav' in sys.argv:plot=1
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
else:
data=sys.stdin.readlines() # read in data from standard input
DIs,Pars=[],[]
for line in data: # read in the data from standard input
pars=[]
rec=line.split() # split each line on space to get records
DIs.append([float(rec[0]),float(rec[1])])
pars.append(float(rec[0]))
pars.append(float(rec[1]))
pars.append(float(rec[2]))
pars.append(float(rec[0]))
isign=abs(float(rec[1])) / float(rec[1])
pars.append(float(rec[1])-isign*90.) #Beta inc
pars.append(float(rec[2])) # gamma
pars.append(float(rec[0])+90.) # Beta dec
pars.append(0.) #Beta inc
Pars.append(pars)
#
EQ={'eq':1} # make plot dictionary
pmagplotlib.plot_init(EQ['eq'],5,5)
title='Equal area projection'
pmagplotlib.plot_eq(EQ['eq'],DIs,title)# plot directions
for k in range(len(Pars)):
pmagplotlib.plot_ell(EQ['eq'],Pars[k],'b',0,1) # plot ellipses
files={}
for key in list(EQ.keys()):
files[key]=key+'.'+fmt
titles={}
titles['eq']='Equal Area Plot'
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
EQ = pmagplotlib.add_borders(EQ,titles,black,purple)
pmagplotlib.save_plots(EQ,files)
elif plot==0:
pmagplotlib.draw_figs(EQ)
ans=input(" S[a]ve to save plot, [q]uit, Return to continue: ")
if ans=="q": sys.exit()
if ans=="a":
pmagplotlib.save_plots(EQ,files)
else:
pmagplotlib.save_plots(EQ,files)
|
def main()
|
NAME
plotdi_a.py
DESCRIPTION
plots equal area projection from dec inc data and fisher mean, cone of confidence
INPUT FORMAT
takes dec, inc, alpha95 as first three columns in space delimited file
SYNTAX
plotdi_a.py [-i][-f FILE]
OPTIONS
-f FILE to read file name from command line
-fmt [png,jpg,eps,pdf,svg] set plot file format ['svg' is default]
-sav save plot and quit
| 3.247371
| 3.054229
| 1.063238
|
D,I=0.,90.
outfile=""
infile=""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
infile=sys.argv[ind+1]
data=numpy.loadtxt(infile)
else:
data=numpy.loadtxt(sys.stdin,dtype=numpy.float)
if '-F' in sys.argv:
ind=sys.argv.index('-F')
outfile=sys.argv[ind+1]
out=open(outfile,'w')
if '-D' in sys.argv:
ind=sys.argv.index('-D')
D=float(sys.argv[ind+1])
if '-I' in sys.argv:
ind=sys.argv.index('-I')
I=float(sys.argv[ind+1])
if len(data.shape)>1: # 2-D array
N=data.shape[0]
DipDir,Dip=numpy.ones(N,dtype=numpy.float).transpose()*(D-180.),numpy.ones(N,dtype=numpy.float).transpose()*(90.-I)
data=data.transpose()
data=numpy.array([data[0],data[1],DipDir ,Dip]).transpose()
drot,irot=pmag.dotilt_V(data)
drot=(drot-180.)%360. #
for k in range(N):
if outfile=="":
print('%7.1f %7.1f ' % (drot[k],irot[k]))
else:
out.write('%7.1f %7.1f\n' % (drot[k],irot[k]))
else:
d,i=pmag.dotilt(data[0],data[1],(D-180.),90.-I)
if outfile=="":
print('%7.1f %7.1f ' % ((d-180.)%360.,i))
else:
out.write('%7.1f %7.1f\n' % ((d-180.)%360.,i))
|
def main()
|
NAME
di_rot.py
DESCRIPTION
rotates set of directions to new coordinate system
SYNTAX
di_rot.py [command line options]
OPTIONS
-h prints help message and quits
-f specify input file, default is standard input
-F specify output file, default is standard output
-D D specify Dec of new coordinate system, default is 0
-I I specify Inc of new coordinate system, default is 90
INTPUT/OUTPUT
dec inc [space delimited]
| 2.418819
| 2.339293
| 1.033996
|
ofile=""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
ofile=sys.argv[ind+1]
out=open(ofile,'w')
if '-flt' in sys.argv:
ind=sys.argv.index('-flt')
flt=float(sys.argv[ind+1])
else:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
input=numpy.loadtxt(file)
else:
input=numpy.loadtxt(sys.stdin,dtype=numpy.float)
# read in inclination data
for line in input:
dec=float(line[0])
inc=float(line[1])*numpy.pi/180.
tincnew=(old_div(1,flt))*numpy.tan(inc)
incnew=numpy.arctan(tincnew)*180./numpy.pi
if ofile=="":
print('%7.1f %7.1f'% (dec,incnew))
else:
out.write('%7.1f %7.1f'% (dec,incnew)+'\n')
|
def main()
|
NAME
unsquish.py
DESCRIPTION
takes dec/inc data and "unsquishes" with specified flattening factor, flt
using formula tan(If)=(1/flt)*tan(Io)
INPUT
declination inclination
OUTPUT
"unsquished" declincation inclination
SYNTAX
unsquish.py [command line options] [< filename]
OPTIONS
-h print help and quit
-f FILE, input file
-F FILE, output file
-flt FLT, flattening factor [required]
| 2.637123
| 2.307892
| 1.142654
|
# does not exclude ptrm checks that are less than tmin
ptrm_checks_included_temps= []
for num, check in enumerate(ptrm_temps):
if check > tmax:
pass
elif ptrm_starting_temps[num] > tmax: # or ptrm_starting_temps[num] < tmin:
pass
else:
ptrm_checks_included_temps.append(check)
return len(ptrm_checks_included_temps), ptrm_checks_included_temps
|
def get_n_ptrm(tmin, tmax, ptrm_temps, ptrm_starting_temps)
|
input: tmin, tmax, ptrm_temps, ptrm_starting_temps
returns number of ptrm_checks included in best fit segment.
excludes checks if temp exceeds tmax OR if starting temp exceeds tmax.
output: n_ptrm, ptrm_checks_included_temperatures
| 3.078552
| 2.417177
| 1.273614
|
if not ptrm_checks_included_temps:
return [], float('nan'), float('nan'), float('nan'), float('nan')
diffs = []
abs_diffs = []
x_Arai_compare = []
ptrm_compare = []
check_percents = []
ptrm_checks_all_temps = list(ptrm_checks_all_temps)
for check in ptrm_checks_included_temps: # goes through each included temperature step
ptrm_ind = ptrm_checks_all_temps.index(check) # indexes the number of the check
ptrm_check = ptrm_x[ptrm_ind] # x value at that temperature step
ptrm_compare.append(ptrm_check) #
arai_ind = t_Arai.index(check)
ptrm_orig = x_Arai[arai_ind]
x_Arai_compare.append(ptrm_orig)
diff = ptrm_orig - ptrm_check
diffs.append(diff)
abs_diffs.append(abs(diff))
if ptrm_orig == 0:
check_percents.append(0)
else:
check_percents.append((old_div(abs(diff), ptrm_orig)) * 100)
max_diff = max(abs_diffs)
check_percent = max(check_percents)
sum_diffs = abs(sum(diffs))
sum_abs_diffs = sum(abs_diffs)
return diffs, max_diff, sum_diffs, check_percent, sum_abs_diffs
|
def get_max_ptrm_check(ptrm_checks_included_temps, ptrm_checks_all_temps, ptrm_x, t_Arai, x_Arai)
|
input: ptrm_checks_included_temps, ptrm_checks_all_temps, ptrm_x, t_Arai, x_Arai.
sorts through included ptrm_checks and finds the largest ptrm check diff,
the sum of the total diffs,
and the percentage of the largest check / original measurement at that temperature step
output: max_diff, sum_diffs, check_percent, sum_abs_diffs.
| 2.25573
| 2.01227
| 1.120988
|
L = numpy.sqrt(delta_x_prime**2 + delta_y_prime**2)
DRAT = (old_div(max_ptrm_check, L)) * 100
return DRAT, L
|
def get_DRAT(delta_x_prime, delta_y_prime, max_ptrm_check)
|
Input: TRM length of best fit line (delta_x_prime),
NRM length of best fit line,
max_ptrm_check
Output: DRAT (maximum difference produced by a ptrm check normed by best fit line),
length best fit line
| 3.518556
| 3.39182
| 1.037365
|
CDRAT = (old_div(sum_ptrm_checks, L)) * 100.
CDRAT_prime = (old_div(sum_abs_ptrm_checks, L)) * 100.
return CDRAT, CDRAT_prime
|
def get_CDRAT(L, sum_ptrm_checks, sum_abs_ptrm_checks)
|
input: best_fit line length, sum of ptrm check diffs,
sum of absolute value of ptrm check diffs
output: CDRAT (uses sum of diffs), CDRAT_prime (uses sum of absolute diffs)
| 2.117175
| 2.261714
| 0.936093
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.