code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
valid_axes = ("country", "state", "year", "month")
if any(map(lambda axis: axis not in valid_axes, fallback_aggaxes)):
raise ValueError(
"All elements of the fallback_aggaxes set must be one of the "
f"following: {valid_axes}"
)
for n in self.nodes(data=True):
for indicator in n[1]["indicators"].values():
indicator.mean, indicator.unit = get_indicator_value(
indicator,
country,
state,
year,
month,
unit,
fallback_aggaxes,
aggfunc,
)
indicator.stdev = 0.1 * abs(indicator.mean) | def parameterize(
self,
country: Optional[str] = "South Sudan",
state: Optional[str] = None,
year: Optional[int] = None,
month: Optional[int] = None,
unit: Optional[str] = None,
fallback_aggaxes: List[str] = ["year", "month"],
aggfunc: Callable = np.mean,
) | Parameterize the analysis graph.
Args:
country
year
month
fallback_aggaxes:
An iterable of strings denoting the axes upon which to perform
fallback aggregation if the desired constraints cannot be met.
aggfunc: The function that will be called to perform the
aggregation if there are multiple matches. | 3.451286 | 3.423018 | 1.008258 |
for n in nodes:
if self.has_node(n):
self.remove_node(n) | def delete_nodes(self, nodes: Iterable[str]) | Iterate over a set of nodes and remove the ones that are present in
the graph. | 4.165965 | 2.907353 | 1.432906 |
if self.has_node(node):
self.remove_node(node) | def delete_node(self, node: str) | Removes a node if it is in the graph. | 6.142513 | 3.202711 | 1.91791 |
if self.has_edge(source, target):
self.remove_edge(source, target) | def delete_edge(self, source: str, target: str) | Removes an edge if it is in the graph. | 3.951741 | 2.490349 | 1.586822 |
for edge in edges:
if self.has_edge(*edge):
self.remove_edge(*edge) | def delete_edges(self, edges: Iterable[Tuple[str, str]]) | Iterate over a set of edges and remove the ones that are present in
the graph. | 3.52781 | 2.720188 | 1.2969 |
# Remove redundant paths.
for node_pair in tqdm(list(permutations(self.nodes(), 2))):
paths = [
list(pairwise(path))
for path in nx.all_simple_paths(self, *node_pair, cutoff)
]
if len(paths) > 1:
for path in paths:
if len(path) == 1:
self.delete_edge(*path[0])
if any(self.degree(n) == 0 for n in path[0]):
self.add_edge(*path[0])
break | def prune(self, cutoff: int = 2) | Prunes the CAG by removing redundant paths. If there are multiple
(directed) paths between two nodes, this function removes all but the
longest paths. Subsequently, it restricts the graph to the largest
connected component.
Args:
cutoff: The maximum path length to consider for finding redundant
paths. Higher values of this parameter correspond to more
aggressive pruning. | 3.367407 | 2.974857 | 1.131956 |
for p in self.predecessors(n1):
for st in self[p][n1]["InfluenceStatements"]:
if not same_polarity:
st.obj_delta["polarity"] = -st.obj_delta["polarity"]
st.obj.db_refs["UN"][0] = (n2, st.obj.db_refs["UN"][0][1])
if not self.has_edge(p, n2):
self.add_edge(p, n2)
self[p][n2]["InfluenceStatements"] = self[p][n1][
"InfluenceStatements"
]
else:
self[p][n2]["InfluenceStatements"] += self[p][n1][
"InfluenceStatements"
]
for s in self.successors(n1):
for st in self.edges[n1, s]["InfluenceStatements"]:
if not same_polarity:
st.subj_delta["polarity"] = -st.subj_delta["polarity"]
st.subj.db_refs["UN"][0] = (n2, st.subj.db_refs["UN"][0][1])
if not self.has_edge(n2, s):
self.add_edge(n2, s)
self[n2][s]["InfluenceStatements"] = self[n1][s][
"InfluenceStatements"
]
else:
self[n2][s]["InfluenceStatements"] += self[n1][s][
"InfluenceStatements"
]
self.remove_node(n1) | def merge_nodes(self, n1: str, n2: str, same_polarity: bool = True) | Merge node n1 into node n2, with the option to specify relative
polarity.
Args:
n1
n2
same_polarity | 1.905961 | 1.921351 | 0.99199 |
nodeset = {concept}
if reverse:
func = self.predecessors
else:
func = self.successors
for i in range(depth):
nodeset.update(
chain.from_iterable([list(func(n)) for n in nodeset])
)
return AnalysisGraph(self.subgraph(nodeset).copy()) | def get_subgraph_for_concept(
self, concept: str, depth: int = 1, reverse: bool = False
) | Returns a new subgraph of the analysis graph for a single concept.
Args:
concept: The concept that the subgraph will be centered around.
depth: The depth to which the depth-first search must be performed.
reverse: Sets the direction of causal influence flow to examine.
Setting this to False (default) will search for upstream causal
influences, and setting it to True will search for
downstream causal influences.
Returns:
AnalysisGraph | 4.371614 | 4.405163 | 0.992384 |
paths = nx.all_simple_paths(self, source, target, cutoff=cutoff)
return AnalysisGraph(self.subgraph(set(chain.from_iterable(paths)))) | def get_subgraph_for_concept_pair(
self, source: str, target: str, cutoff: Optional[int] = None
) | Get subgraph comprised of simple paths between the source and the
target.
Args:
source
target
cutoff | 5.099069 | 5.319389 | 0.958582 |
path_generator = (
nx.all_simple_paths(self, source, target, cutoff=cutoff)
for source, target in permutations(concepts, 2)
)
paths = chain.from_iterable(path_generator)
return AnalysisGraph(self.subgraph(set(chain.from_iterable(paths)))) | def get_subgraph_for_concept_pairs(
self, concepts: List[str], cutoff: Optional[int] = None
) | Get subgraph comprised of simple paths between the source and the
target.
Args:
concepts
cutoff | 3.410166 | 4.05757 | 0.840445 |
def _format(node, level=0):
if isinstance(node, ast.AST):
fields = [(a, _format(b, level)) for a, b in ast.iter_fields(node)]
if include_attributes and node._attributes:
fields.extend(
[
(a, _format(getattr(node, a), level))
for a in node._attributes
]
)
return "".join(
[
node.__class__.__name__,
"(",
", ".join(
("%s=%s" % field for field in fields)
if annotate_fields
else (b for a, b in fields)
),
")",
]
)
elif isinstance(node, list):
lines = ["["]
lines.extend(
(
indent * (level + 2) + _format(x, level + 2) + ","
for x in node
)
)
if len(lines) > 1:
lines.append(indent * (level + 1) + "]")
else:
lines[-1] += "]"
return "\n".join(lines)
return repr(node)
if not isinstance(node, ast.AST):
raise TypeError("expected AST, got %r" % node.__class__.__name__)
return _format(node) | def dump(node, annotate_fields=True, include_attributes=False, indent=" ") | Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation
is wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True. | 1.676937 | 1.656936 | 1.012071 |
lambdaStrings = ["import math\n\n"]
state = PGMState(lambdaStrings)
generator = GrFNGenerator()
generator.mode_mapper = mode_mapper_dict
pgm = generator.genPgm(asts, state, {}, "")[0]
if pgm.get("start"):
pgm["start"] = pgm["start"][0]
else:
pgm["start"] = generator.function_defs[-1]
pgm["source"] = [[get_path(file_name, "source")]]
# dateCreated stores the date and time on which the lambda and PGM file was created.
# It is stored in YYYMMDD format
pgm["dateCreated"] = f"{datetime.today().strftime('%Y%m%d')}"
with open(lambdaFile, "w") as f:
f.write("".join(lambdaStrings))
# View the PGM file that will be used to build a scope tree
if save_file:
json.dump(pgm, open(file_name[:file_name.rfind(".")] + ".json", "w"))
return pgm | def create_pgm_dict(
lambdaFile: str,
asts: List,
file_name: str,
mode_mapper_dict: dict,
save_file=False,
) -> Dict | Create a Python dict representing the PGM, with additional metadata for
JSON output. | 5.18783 | 5.027509 | 1.031889 |
filtered_sts = []
counters = {}
def update_counter(counter_name):
if counter_name in counters:
counters[counter_name] += 1
else:
counters[counter_name] = 1
for s in tqdm(sts):
update_counter("Original number of statements")
# Apply belief score threshold cutoff
if not s.belief > belief_score_cutoff:
continue
update_counter(f"Statements with belief score > {belief_score_cutoff}")
# Select statements with UN groundings
if s.subj.db_refs.get("UN") is None or s.obj.db_refs.get("UN") is None:
continue
update_counter("Statements with UN groundings")
# Apply grounding score cutoffs
if not all(
x[1] > grounding_score_cutoff
for x in (y.db_refs["UN"][0] for y in (s.subj, s.obj))
):
continue
update_counter(
f"Statements with subj and obj grounding scores > {grounding_score_cutoff}"
)
# Assign default polarities
if s.subj_delta["polarity"] is None:
s.subj_delta["polarity"] = 1
if s.obj_delta["polarity"] is None:
s.obj_delta["polarity"] = 1
filtered_sts.append(s)
for k, v in counters.items():
print(f"{k}: {v}")
return filtered_sts | def filter_and_process_statements(
sts,
grounding_score_cutoff: float = 0.8,
belief_score_cutoff: float = 0.85,
concepts_of_interest: List[str] = [],
) | Filter preassembled statements according to certain rules. | 2.623215 | 2.567667 | 1.021634 |
with open(input, "rb") as f:
G = pickle.load(f)
G.map_concepts_to_indicators(min_temporal_res="month")
G.set_indicator("UN/events/weather/precipitation", "Historical Average Total Daily Rainfall (Maize)", "DSSAT")
G.set_indicator("UN/events/human/agriculture/food_production",
"Historical Production (Maize)", "DSSAT")
G.set_indicator("UN/entities/human/food/food_security", "IPC Phase Classification", "FEWSNET")
G.set_indicator("UN/entities/food_availability", "Production, Meat indigenous, total", "FAO")
G.set_indicator("UN/entities/human/financial/economic/market", "Inflation Rate", "ieconomics.com")
G.set_indicator("UN/events/human/death", "Battle-related deaths", "WDI")
with open(output, "wb") as f:
pickle.dump(G, f) | def create_CAG_with_indicators(input, output, filename="CAG_with_indicators.pdf") | Create a CAG with mapped indicators | 7.740408 | 7.472154 | 1.035901 |
# Set input values
for i in self.inputs:
self.nodes[i]["value"] = inputs[i]
for func_set in self.function_sets:
for func_name in func_set:
lambda_fn = self.nodes[func_name]["lambda_fn"]
output_node = list(self.successors(func_name))[0]
signature = self.nodes[func_name]["func_inputs"]
input_values = [self.nodes[n]["value"] for n in signature]
res = lambda_fn(*input_values)
if torch_size is not None and len(signature) == 0:
self.nodes[output_node]["value"] = torch.tensor(
[res] * torch_size, dtype=torch.double
)
else:
self.nodes[output_node]["value"] = res
# Return the output
return self.nodes[self.output_node]["value"] | def run(
self,
inputs: Dict[str, Union[float, Iterable]],
torch_size: Optional[int] = None,
) -> Union[float, Iterable] | Executes the GrFN over a particular set of inputs and returns the
result.
Args:
inputs: Input set where keys are the names of input nodes in the
GrFN and each key points to a set of input values (or just one).
Returns:
A set of outputs from executing the GrFN, one for every set of
inputs. | 2.726019 | 2.899873 | 0.940048 |
G = nx.DiGraph()
for (name, attrs) in self.nodes(data=True):
if attrs["type"] == "variable":
for pred_fn in self.predecessors(name):
if not any(
fn_type in pred_fn
for fn_type in ("condition", "decision")
):
for pred_var in self.predecessors(pred_fn):
G.add_node(
self.nodes[pred_var]["basename"],
**self.nodes[pred_var],
)
G.add_node(attrs["basename"], **attrs)
G.add_edge(
self.nodes[pred_var]["basename"],
attrs["basename"],
)
if attrs["is_loop_index"]:
G.add_edge(attrs["basename"], attrs["basename"])
return G | def to_CAG(self) | Export to a Causal Analysis Graph (CAG) PyGraphviz AGraph object.
The CAG shows the influence relationships between the variables and
elides the function nodes. | 3.37364 | 2.949736 | 1.143709 |
tab = " "
result = list()
for n in node_set:
repr = (
n
if self.nodes[n]["type"] == "variable"
else f"{n}{inspect.signature(self.nodes[n]['lambda_fn'])}"
)
result.append(f"{tab * depth}{repr}")
result.extend(
self.traverse_nodes(self.successors(n), depth=depth + 1)
)
return result | def traverse_nodes(self, node_set, depth=0) | BFS traversal of nodes that returns name traversal as large string.
Args:
node_set: Set of input nodes to begin traversal.
depth: Current traversal depth for child node viewing.
Returns:
type: String containing tabbed traversal view. | 3.559726 | 3.691306 | 0.964354 |
with open(file, "r") as f:
data = json.load(f)
return cls.from_dict(data, lambdas) | def from_json_and_lambdas(cls, file: str, lambdas) | Builds a GrFN from a JSON object.
Args:
cls: The class variable for object creation.
file: Filename of a GrFN JSON file.
Returns:
type: A GroundedFunctionNetwork object. | 2.680429 | 3.092383 | 0.866784 |
functions = {d["name"]: d for d in data["functions"]}
G = nx.DiGraph()
scope_tree = nx.DiGraph()
def add_variable_node(
basename: str, parent: str, index: int, is_loop_index: bool = False
):
G.add_node(
f"{parent}::{basename}_{index}",
type="variable",
color="maroon",
parent=parent,
label=f"{basename}_{index}",
basename=basename,
is_loop_index=is_loop_index,
padding=15,
value=None,
)
def find_correct_scope(cur_scope, var_name):
if cur_scope.parent is None:
return cur_scope.name, -1
new_scope = cur_scope.parent
if var_name in new_scope.variables:
return new_scope.name, new_scope.variables[var_name]
else:
return find_correct_scope(new_scope, var_name)
def process_container(
scope, loop_index_variable: Optional[str] = None
):
for stmt in scope.body:
if "name" in stmt:
stmt_type = functions[stmt["name"]]["type"]
if stmt_type in ("assign", "condition", "decision"):
if stmt_type == "assign":
if "body" in functions[stmt["name"]]:
stmt_type = "literal"
output = stmt["output"]
scope.variables[output["variable"]] = output["index"]
add_variable_node(
output["variable"], scope.name, output["index"]
)
G.add_edge(
stmt["name"],
f"{scope.name}::{output['variable']}_{output['index']}",
)
ordered_inputs = list()
for input in stmt.get("input", []):
parent = scope.name
index = input["index"]
base_name = input["variable"]
if (
index == -1
and base_name != loop_index_variable
):
parent, index = find_correct_scope(
scope, base_name
)
add_variable_node(
base_name,
parent,
index,
base_name == loop_index_variable,
)
node_name = f"{parent}::{base_name}_{index}"
ordered_inputs.append(node_name)
G.add_edge(node_name, stmt["name"])
G.add_node(
stmt["name"],
type="function",
lambda_fn=getattr(lambdas, stmt["name"]),
func_inputs=ordered_inputs,
shape="rectangle",
parent=scope.name,
label=stmt_type[0].upper(),
padding=10,
)
elif stmt_type == "loop_plate":
index_variable = functions[stmt["name"]][
"index_variable"
]
scope_tree.add_node(stmt["name"], color="blue")
scope_tree.add_edge(scope.name, stmt["name"])
new_scope = ScopeNode(
functions[stmt["name"]], parent=scope
)
process_container(
new_scope, loop_index_variable=index_variable
)
else:
pass
elif "function" in stmt and stmt["function"] != "print":
scope_tree.add_node(stmt["function"], color="green")
scope_tree.add_edge(scope.name, stmt["function"])
new_scope = ScopeNode(
functions[stmt["function"]], parent=scope
)
process_container(new_scope)
root = data["start"]
scope_tree.add_node(root, color="green")
cur_scope = ScopeNode(functions[root])
process_container(cur_scope)
return cls(G, scope_tree) | def from_dict(cls, data: Dict, lambdas) | Builds a GrFN object from a set of extracted function data objects
and an associated file of lambda functions.
Args:
cls: The class variable for object creation.
data: A set of function data object that specify the wiring of a
GrFN object.
lambdas: [Module] A python module containing actual python
functions to be computed during GrFN execution.
Returns:
A GroundedFunctionNetwork object. | 2.546379 | 2.575062 | 0.988861 |
with open(python_file, "r") as f:
pySrc = f.read()
return cls.from_python_src(pySrc, lambdas_path, json_filename, stem) | def from_python_file(
cls, python_file, lambdas_path, json_filename: str, stem: str
) | Builds GrFN object from Python file. | 2.842518 | 2.543166 | 1.117708 |
asts = [ast.parse(pySrc)]
pgm_dict = genPGM.create_pgm_dict(
lambdas_path,
asts,
json_filename,
{"FileName": f"{stem}.py"}, # HACK
)
lambdas = importlib.__import__(stem + "_lambdas")
return cls.from_dict(pgm_dict, lambdas) | def from_python_src(
cls,
pySrc,
lambdas_path,
json_filename: str,
stem: str,
save_file: bool = False,
) | Builds GrFN object from Python source code. | 6.767515 | 6.373628 | 1.061799 |
stem = Path(fortran_file).stem
if tmpdir == "." and "/" in fortran_file:
tmpdir = Path(fortran_file).parent
preprocessed_fortran_file = f"{tmpdir}/{stem}_preprocessed.f"
lambdas_path = f"{tmpdir}/{stem}_lambdas.py"
json_filename = stem + ".json"
with open(fortran_file, "r") as f:
inputLines = f.readlines()
with open(preprocessed_fortran_file, "w") as f:
f.write(preprocessor.process(inputLines))
xml_string = sp.run(
[
"java",
"fortran.ofp.FrontEnd",
"--class",
"fortran.ofp.XMLPrinter",
"--verbosity",
"0",
preprocessed_fortran_file,
],
stdout=sp.PIPE,
).stdout
trees = [ET.fromstring(xml_string)]
comments = get_comments.get_comments(preprocessed_fortran_file)
os.remove(preprocessed_fortran_file)
xml_to_json_translator = translate.XMLToJSONTranslator()
outputDict = xml_to_json_translator.analyze(trees, comments)
pySrc = pyTranslate.create_python_source_list(outputDict)[0][0]
G = cls.from_python_src(pySrc, lambdas_path, json_filename, stem)
return G | def from_fortran_file(cls, fortran_file: str, tmpdir: str = ".") | Builds GrFN object from a Fortran program. | 3.794286 | 3.682528 | 1.030348 |
import tempfile
fp = tempfile.NamedTemporaryFile('w+t', delete=False, dir=dir)
fp.writelines(fortran_src)
fp.close()
G = cls.from_fortran_file(fp.name, dir)
os.remove(fp.name)
return G | def from_fortran_src(cls, fortran_src: str, dir: str = ".") | Create a GroundedFunctionNetwork instance from a string with raw
Fortran code.
Args:
fortran_src: A string with Fortran source code.
dir: (Optional) - the directory in which the temporary Fortran file
will be created (make sure you have write permission!) Defaults to
the current directory.
Returns:
A GroundedFunctionNetwork instance | 2.791124 | 2.632385 | 1.060302 |
for n in self.nodes():
if self.nodes[n]["type"] == "variable":
self.nodes[n]["value"] = None
elif self.nodes[n]["type"] == "function":
self.nodes[n]["func_visited"] = False | def clear(self) | Clear variable nodes for next computation. | 3.131371 | 2.701942 | 1.158933 |
if not isinstance(other, GroundedFunctionNetwork):
raise TypeError(
f"Expected GroundedFunctionNetwork, but got {type(other)}"
)
def shortname(var):
return var[var.find("::") + 2 : var.rfind("_")]
def shortname_vars(graph, shortname):
return [v for v in graph.nodes() if shortname in v]
this_var_nodes = [
shortname(n)
for (n, d) in self.nodes(data=True)
if d["type"] == "variable"
]
other_var_nodes = [
shortname(n)
for (n, d) in other.nodes(data=True)
if d["type"] == "variable"
]
shared_vars = set(this_var_nodes).intersection(set(other_var_nodes))
full_shared_vars = {
full_var
for shared_var in shared_vars
for full_var in shortname_vars(self, shared_var)
}
return ForwardInfluenceBlanket(self, full_shared_vars) | def to_FIB(self, other) | Creates a ForwardInfluenceBlanket object representing the
intersection of this model with the other input model.
Args:
other: The GroundedFunctionNetwork object to compare this model to.
Returns:
A ForwardInfluenceBlanket object to use for model comparison. | 2.658684 | 2.167815 | 1.226435 |
A = nx.nx_agraph.to_agraph(self)
A.graph_attr.update(
{"dpi": 227, "fontsize": 20, "fontname": "Menlo", "rankdir": "TB"}
)
A.node_attr.update({"fontname": "Menlo"})
def build_tree(cluster_name, root_graph):
subgraph_nodes = [
n[0]
for n in self.nodes(data=True)
if n[1]["parent"] == cluster_name
]
root_graph.add_nodes_from(subgraph_nodes)
subgraph = root_graph.add_subgraph(
subgraph_nodes,
name=f"cluster_{cluster_name}",
label=cluster_name,
style="bold, rounded",
)
for n in self.scope_tree.successors(cluster_name):
build_tree(n, subgraph)
root = [n for n, d in self.scope_tree.in_degree() if d == 0][0]
build_tree(root, A)
return A | def to_agraph(self) | Export to a PyGraphviz AGraph object. | 2.350634 | 2.327245 | 1.01005 |
CAG = self.to_CAG()
A = nx.nx_agraph.to_agraph(CAG)
A.graph_attr.update({"dpi": 227, "fontsize": 20, "fontname": "Menlo"})
A.node_attr.update(
{
"shape": "rectangle",
"color": "#650021",
"style": "rounded",
"fontname": "Gill Sans",
}
)
A.edge_attr.update({"color": "#650021", "arrowsize": 0.5})
return A | def to_CAG_agraph(self) | Returns a variable-only view of the GrFN in the form of an AGraph.
Returns:
type: A CAG constructed via variable influence in the GrFN object. | 2.39541 | 2.504429 | 0.95647 |
A = nx.nx_agraph.to_agraph(self.call_graph)
A.graph_attr.update({"dpi": 227, "fontsize": 20, "fontname": "Menlo"})
A.node_attr.update(
{"shape": "rectangle", "color": "#650021", "style": "rounded"}
)
A.edge_attr.update({"color": "#650021", "arrowsize": 0.5})
return A | def to_call_agraph(self) | Build a PyGraphviz AGraph object corresponding to a call graph of
functions. | 2.686612 | 2.502179 | 1.073709 |
# Abort run if covers does not match our expected cover set
if len(covers) != len(self.cover_nodes):
raise ValueError("Incorrect number of cover values.")
# Set the cover node values
for node_name, val in covers.items():
self.nodes[node_name]["value"] = val
return super().run(inputs, torch_size) | def run(
self,
inputs: Dict[str, Union[float, Iterable]],
covers: Dict[str, Union[float, Iterable]],
torch_size: Optional[int] = None,
) -> Union[float, Iterable] | Executes the FIB over a particular set of inputs and returns the
result.
Args:
inputs: Input set where keys are the names of input nodes in the
GrFN and each key points to a set of input values (or just one).
Returns:
A set of outputs from executing the GrFN, one for every set of
inputs. | 4.85941 | 5.160572 | 0.941642 |
args = self.inputs
Si = self.sobol_analysis(
num_samples,
{
"num_vars": len(args),
"names": args,
"bounds": [bounds[arg] for arg in args],
},
covers
)
S2 = Si["S2"]
(s2_max, v1, v2) = get_max_s2_sensitivity(S2)
x_var = args[v1]
y_var = args[v2]
search_space = [(x_var, bounds[x_var]), (y_var, bounds[y_var])]
preset_vals = {
arg: presets[arg]
for i, arg in enumerate(args)
if i != v1 and i != v2
}
X = np.linspace(*search_space[0][1], sizes[0])
Y = np.linspace(*search_space[1][1], sizes[1])
if use_torch:
Xm, Ym = torch.meshgrid(torch.tensor(X), torch.tensor(Y))
inputs = {n: torch.full_like(Xm, v) for n, v in presets.items()}
inputs.update({search_space[0][0]: Xm, search_space[1][0]: Ym})
Z = self.run(inputs, covers).numpy()
else:
Xm, Ym = np.meshgrid(X, Y)
Z = np.zeros((len(X), len(Y)))
for x, y in itertools.product(range(len(X)), range(len(Y))):
inputs = {n: v for n, v in presets.items()}
inputs.update({search_space[0][0]: x, search_space[1][0]: y})
Z[x][y] = self.run(inputs, covers)
return X, Y, Z, x_var, y_var | def S2_surface(self, sizes, bounds, presets, covers, use_torch=False,
num_samples = 10) | Calculates the sensitivity surface of a GrFN for the two variables with
the highest S2 index.
Args:
num_samples: Number of samples for sensitivity analysis.
sizes: Tuple of (number of x inputs, number of y inputs).
bounds: Set of bounds for GrFN inputs.
presets: Set of standard values for GrFN inputs.
Returns:
Tuple:
Tuple: The names of the two variables that were selected
Tuple: The X, Y vectors of eval values
Z: The numpy matrix of output evaluations | 2.440497 | 2.373929 | 1.028041 |
val = {"tag": root.tag, "args": []}
for node in root:
val["args"] += self.parseTree(node, state)
return [val] | def process_direct_map(self, root, state) -> List[Dict] | Handles tags that are mapped directly from xml to IR with no
additional processing other than recursive translation of any child
nodes. | 5.23695 | 5.560524 | 0.941809 |
if root.tag in self.AST_TAG_HANDLERS:
return self.AST_TAG_HANDLERS[root.tag](root, state)
elif root.tag in self.libRtns:
return self.process_libRtn(root, state)
else:
prog = []
for node in root:
prog += self.parseTree(node, state)
return prog | def parseTree(self, root, state: ParseState) -> List[Dict] | Parses the XML ast tree recursively to generate a JSON AST
which can be ingested by other scripts to generate Python
scripts.
Args:
root: The current root of the tree.
state: The current state of the tree defined by an object of the
ParseState class.
Returns:
ast: A JSON ast that defines the structure of the Fortran file. | 3.638767 | 3.80892 | 0.955328 |
for element in root.iter():
if element.tag == "function":
self.functionList.append(element.attrib["name"]) | def loadFunction(self, root) | Loads a list with all the functions in the Fortran File
Args:
root: The root of the XML ast tree.
Returns:
None
Does not return anything but populates a list (self.functionList) that
contains all the functions in the Fortran File. | 3.2985 | 3.077926 | 1.071663 |
outputDict = {}
ast = []
# Parse through the ast once to identify and grab all the functions
# present in the Fortran file.
for tree in trees:
self.loadFunction(tree)
# Parse through the ast tree a second time to convert the XML ast
# format to a format that can be used to generate Python statements.
for tree in trees:
ast += self.parseTree(tree, ParseState())
if self.entryPoint:
entry = {"program": self.entryPoint[0]}
else:
entry = {}
if self.functionList:
entry["function"] = self.functionList
if self.subroutineList:
entry["subroutine"] = self.subroutineList
# Load the functions list and Fortran ast to a single data structure
# which can be pickled and hence is portable across various scripts and
# usages.
outputDict["ast"] = ast
outputDict["functionList"] = self.functionList
outputDict["comments"] = comments
return outputDict | def analyze(
self, trees: List[ET.ElementTree], comments: OrderedDict
) -> Dict | Find the entry point for the Fortran file.
The entry point for a conventional Fortran file is always the PROGRAM
section. This 'if' statement checks for the presence of a PROGRAM
segment.
If not found, the entry point can be any of the functions or
subroutines in the file. So, all the functions and subroutines of the
program are listed and included as the possible entry point. | 7.186169 | 6.318536 | 1.137315 |
parser = ArgumentParser(
description="Dynamic Bayes Net Executable Model",
formatter_class=ArgumentDefaultsHelpFormatter,
)
def add_flag(short_arg: str, long_arg: str, help: str):
parser.add_argument(
"-" + short_arg, "--" + long_arg, help=help, action="store_true"
)
subparsers = parser.add_subparsers()
parser_execute = subparsers.add_parser("execute", help="Model execution")
parser_execute.set_defaults(func=execute)
# ==========================================================================
# Model execution options
# ==========================================================================
parser_execute.add_argument(
"--input_dressed_cag",
help="Path to the input dressed cag",
type=str,
default="delphi_model.pkl",
)
parser_execute.add_argument(
"--steps",
help="Number of time steps to take",
type=partial(positive_int, "steps"),
default=5,
)
parser_execute.add_argument(
"--samples",
help="Number of sequences to sample",
type=partial(positive_int, "samples"),
default=100,
)
parser_execute.add_argument(
"--output_sequences",
help="Output file containing sampled sequences",
type=str,
default="dbn_sampled_sequences.csv",
)
parser_execute.add_argument(
"--input_variables",
help="Path to the variables of the input dressed cag",
type=str,
default="bmi_config.txt",
)
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
else:
args.func(args) | def main() | Run the CLI. | 2.680284 | 2.676215 | 1.001521 |
df = pd.read_csv("south_sudan_data_fao.csv")
gb = df.groupby("Element")
d = [
{
"events": [
{
k: [
{e: [process_variable_name(k, e)]}
for e in list(set(gb.get_group(k)["Item"].tolist()))
]
}
for k in gb.groups.keys()
]
}
]
yaml = YAML()
yaml.default_flow_style = False
with open("fao_variable_ontology.yml", "w") as f:
yaml.dump(d, f) | def construct_FAO_ontology() | Construct FAO variable ontology for use with Eidos. | 4.412187 | 4.002232 | 1.102432 |
return create_statement_inspection_table(
G[source][target]["InfluenceStatements"]
) | def inspect_edge(G: AnalysisGraph, source: str, target: str) | 'Drill down' into an edge in the analysis graph and inspect its
provenance. This function prints the provenance.
Args:
G
source
target | 43.430504 | 31.38369 | 1.383856 |
return chain.from_iterable(
[
[repr(e.text) for e in s.evidence]
for s in G.edges[source, target]["InfluenceStatements"]
]
) | def _get_edge_sentences(
G: AnalysisGraph, source: str, target: str
) -> List[str] | Return the sentences that led to the construction of a specified edge.
Args:
G
source: The source of the edge.
target: The target of the edge. | 6.438461 | 7.178972 | 0.89685 |
if type_str == "container":
return NodeType.CONTAINER
elif type_str == "loop_plate":
return NodeType.LOOP
elif type_str == "assign":
return NodeType.ASSIGN
elif type_str == "condition":
return NodeType.CONDITION
elif type_str == "decision":
return NodeType.DECISION
else:
raise ValueError("Unrecognized type string: ", type_str) | def get_node_type(type_str) | Returns the NodeType given a name of a JSON function object. | 2.667073 | 2.533612 | 1.052676 |
out_format_list = []
for type_item in type_list:
item_format = default_output_format(type_item)
out_format_list.append(item_format)
return out_format_list | def list_output_formats(type_list) | This function takes a list of type names and returns a list of
format specifiers for list-directed output of values of those types. | 2.537666 | 2.760731 | 0.919201 |
data_type = []
for item in type_list:
match = re.match(r"(\d+)(.+)", item)
if not match:
reps = 1
if item[0] in "FfEegG":
data_type.append("REAL")
elif item[0] in "Ii":
data_type.append("INTEGER")
else:
reps = match.group(1)
fmt = match.group(2)
if "(" in fmt and "," in fmt:
fmt = fmt[1:-1].split(",")
elif "(" in fmt:
fmt = [fmt[1:-1]]
else:
fmt = [fmt]
for i in range(int(reps)):
for ft in fmt:
if ft[0] in "FfEegG":
data_type.append("REAL")
elif ft[0] in "Ii":
data_type.append("INTEGER")
return data_type | def list_data_type(type_list) | This function takes a list of format specifiers and returns a list of data
types represented by the format specifiers. | 2.500265 | 2.382027 | 1.049638 |
format_list = self._format_list
self._re_cvt = self.match_input_fmt(format_list)
regexp0_str = "".join([subs[0] for subs in self._re_cvt])
self._regexp_str = regexp0_str
self._re = re.compile(regexp0_str)
self._match_exps = [
subs[1] for subs in self._re_cvt if subs[1] is not None
]
self._divisors = [subs[2] for subs in self._re_cvt if subs[2] is not None]
self._in_cvt_fns = [
subs[3] for subs in self._re_cvt if subs[3] is not None
]
self._read_line_init = True | def init_read_line(self) | init_read_line() initializes fields relevant to input matching | 3.584789 | 3.390621 | 1.057266 |
format_list = self._format_list
output_info = self.gen_output_fmt(format_list)
self._output_fmt = "".join([sub[0] for sub in output_info])
self._out_gen_fmt = [sub[1] for sub in output_info if sub[1] is not None]
self._out_widths = [sub[2] for sub in output_info if sub[2] is not None]
self._write_line_init = True | def init_write_line(self) | init_write_line() initializes fields relevant to output generation | 3.539822 | 3.374861 | 1.04888 |
if not self._read_line_init:
self.init_read_line()
match = self._re.match(line)
assert match is not None, f"Format mismatch (line = {line})"
matched_values = []
for i in range(self._re.groups):
cvt_re = self._match_exps[i]
cvt_div = self._divisors[i]
cvt_fn = self._in_cvt_fns[i]
match_str = match.group(i + 1)
match0 = re.match(cvt_re, match_str)
if match0 is not None:
if cvt_fn == "float":
if "." in match_str:
val = float(match_str)
else:
val = int(match_str) / cvt_div
elif cvt_fn == "int":
val = int(match_str)
else:
sys.stderr.write(
f"Unrecognized conversion function: {cvt_fn}\n"
)
else:
sys.stderr.write(
f"Format conversion failed: {match_str}\n"
)
matched_values.append(val)
return tuple(matched_values) | def read_line(self, line) | Match a line of input according to the format specified and return a
tuple of the resulting values | 2.906351 | 2.79686 | 1.039148 |
if not self._write_line_init:
self.init_write_line()
if len(self._out_widths) > len(values):
raise For2PyError(f"ERROR: too few values for format {self._format_list}\n")
out_strs = []
for i in range(len(self._out_widths)):
out_fmt = self._out_gen_fmt[i]
out_width = self._out_widths[i]
out_val = out_fmt.format(values[i])
if len(out_val) > out_width: # value too big for field
out_val = "*" * out_width
out_strs.append(out_val)
out_str_exp = (
'"' + self._output_fmt + '".format' + str(tuple(out_strs))
)
out_str = eval(out_str_exp)
return out_str + "\n" | def write_line(self, values) | Process a list of values according to the format specified to generate
a line of output. | 3.936625 | 3.764475 | 1.04573 |
rexp_list = []
for fmt in fmt_list:
rexp_list.extend(self.match_input_fmt_1(fmt))
return rexp_list | def match_input_fmt(self, fmt_list) | Given a list of Fortran format specifiers, e.g., ['I5', '2X', 'F4.1'],
this function constructs a list of tuples for matching an input
string against those format specifiers. | 3.713009 | 3.517031 | 1.055722 |
# first, remove any surrounding space
fmt = fmt.strip()
# get any leading digits indicating repetition
match = re.match("(\d+)(.+)", fmt)
if match is None:
reps = 1
else:
reps = int(match.group(1))
fmt = match.group(2)
if fmt[0] == "(": # process parenthesized format list recursively
fmt = fmt[1:-1]
fmt_list = fmt.split(",")
rexp = self.match_input_fmt(fmt_list)
else:
if fmt[0] in "iI": # integer
sz = fmt[1:]
xtract_rexp = "(.{" + sz + "})" # r.e. for extraction
leading_sp = " *"
optional_sign = "-?"
rexp0 = "\d+"
rexp1 = leading_sp + optional_sign + rexp0 # r.e. for matching
divisor = 1
rexp = [(xtract_rexp, rexp1, divisor, "int")]
elif fmt[0] in "xX": # skip
xtract_rexp = "." # r.e. for extraction
rexp = [(xtract_rexp, None, None, None)]
elif fmt[0] in "fF": # floating point
idx0 = fmt.find(".")
sz = fmt[1:idx0]
divisor = 10 ** (int(fmt[idx0 + 1 :]))
xtract_rexp = "(.{" + sz + "})" # r.e. for extraction
leading_sp = " *"
optional_sign = "-?"
rexp0 = "\d+(\.\d+)?"
rexp1 = leading_sp + optional_sign + rexp0 # r.e. for matching
rexp = [(xtract_rexp, rexp1, divisor, "float")]
else:
raise For2PyError(
f"ERROR: Unrecognized format specifier {fmt}\n"
)
# replicate the regular expression by the repetition factor in the format
rexp *= reps
return rexp | def match_input_fmt_1(self, fmt) | Given a single format specifier, e.g., '2X', 'I5', etc., this function
constructs a list of tuples for matching against that specifier. Each
element of this list is a tuple
(xtract_re, cvt_re, divisor, cvt_fn)
where:
xtract_re is a regular expression that extracts an input field of
the requisite width;
cvt_re is a regular expression that matches the character sequence
extracted by xtract_re against the specified format;
divisor is the value to divide by in order to get the appropriate
number of decimal places if a decimal point is not given
in the input value (meaningful only for floats); and
cvt_fn is a string denoting the function to be used to convert the
matched string to a value. | 3.411802 | 3.164794 | 1.078049 |
rexp_list = []
for fmt in fmt_list:
rexp_list.extend(self.gen_output_fmt_1(fmt))
return rexp_list | def gen_output_fmt(self, fmt_list) | given a list of Fortran format specifiers, e.g., ['I5', '2X', 'F4.1'],
this function constructs a list of tuples for constructing an output
string based on those format specifiers. | 3.837218 | 3.757325 | 1.021263 |
# first, remove any surrounding space
fmt = fmt.strip()
# get any leading digits indicating repetition
match = re.match("(\d+)(.+)", fmt)
if match is None:
reps = 1
else:
reps = int(match.group(1))
fmt = match.group(2)
if fmt[0] == "(": # process parenthesized format list recursively
fmt = fmt[1:-1]
fmt_list = fmt.split(",")
rexp = self.gen_output_fmt(fmt_list)
else:
if fmt[0] in "iI": # integer
sz = fmt[1:]
gen_fmt = "{}"
cvt_fmt = "{:" + str(sz) + "d}"
rexp = [(gen_fmt, cvt_fmt, int(sz))]
elif fmt[0] in "xX":
gen_fmt = " "
rexp = [(gen_fmt, None, None)]
elif fmt[0] in "eEfFgG": # various floating point formats
idx0 = fmt.find(".")
sz = fmt[1:idx0]
suffix = fmt[idx0 + 1 :]
# The 'E' and G formats can optionally specify the width of
# the exponent, e.g.: 'E15.3E2'. For now we ignore any such
# the exponent width -- but if it's there, we need to extract
# the sequence of digits before it.
m = re.match("(\d+).*", suffix)
assert m is not None, f"Improper format? '{fmt}'"
prec = m.group(1)
gen_fmt = "{}"
cvt_fmt = "{:" + sz + "." + prec + fmt[0] + "}"
rexp = [(gen_fmt, cvt_fmt, int(sz))]
elif fmt[0] in "pP": # scaling factor
# For now we ignore scaling: there are lots of other things we
# need to spend time on. To fix later if necessary.
rest_of_fmt = fmt[1:]
rexp = self.gen_output_fmt_1(rest_of_fmt)
elif fmt[0] in "'\"": # character string
sz = len(fmt) - 2 # -2 for the quote at either end
gen_fmt = fmt[1:-1]
rexp = [(gen_fmt, None, None)]
elif fmt[0] == "/": # newlines
gen_fmt = "\\n" * len(fmt)
rexp = [(gen_fmt, None, None)]
else:
raise For2PyError(
f"ERROR: Unrecognized format specifier {fmt}\n"
)
# replicate the regular expression by the repetition factor in the format
rexp *= reps
return rexp | def gen_output_fmt_1(self, fmt) | given a single format specifier, get_output_fmt_1() constructs and returns
a list of tuples for matching against that specifier.
Each element of this list is a tuple
(gen_fmt, cvt_fmt, sz)
where:
gen_fmt is the Python format specifier for assembling this value into
the string constructed for output;
cvt_fmt is the Python format specifier for converting this value into
a string that will be assembled into the output string; and
sz is the width of this field. | 4.166907 | 3.896649 | 1.069357 |
adjective_response_dict = {}
all_θs = []
# Setting σ_X and σ_Y that are in Eq. 1.21 of the model document.
# This assumes that the real-valued variables representing the abstract
# concepts are on the order of 1.0.
# TODO Make this more general.
σ_X = σ_Y = 0.1
for stmt in e[2]["InfluenceStatements"]:
for ev in stmt.evidence:
# To account for discrepancy between Hume and Eidos extractions
if ev.annotations.get("subj_adjectives") is not None:
for subj_adjective in ev.annotations["subj_adjectives"]:
if (
subj_adjective in gb.groups
and subj_adjective not in adjective_response_dict
):
adjective_response_dict[subj_adjective] = get_respdevs(
gb.get_group(subj_adjective)
)
rs_subj = stmt.subj_delta[
"polarity"
] * adjective_response_dict.get(subj_adjective, rs)
for obj_adjective in ev.annotations["obj_adjectives"]:
if (
obj_adjective in gb.groups
and obj_adjective not in adjective_response_dict
):
adjective_response_dict[
obj_adjective
] = get_respdevs(gb.get_group(obj_adjective))
rs_obj = stmt.obj_delta[
"polarity"
] * adjective_response_dict.get(obj_adjective, rs)
xs1, ys1 = np.meshgrid(rs_subj, rs_obj, indexing="xy")
θs = np.arctan2(σ_Y * ys1.flatten(), xs1.flatten())
all_θs.append(θs)
# Prior
xs1, ys1 = np.meshgrid(
stmt.subj_delta["polarity"] * rs,
stmt.obj_delta["polarity"] * rs,
indexing="xy",
)
# TODO - make the setting of σ_X and σ_Y more automated
θs = np.arctan2(σ_Y * ys1.flatten(), σ_X * xs1.flatten())
# all_θs.append(θs)
# return gaussian_kde(np.concatenate(all_θs))
if len(all_θs) == 0:
all_θs.append(θs)
return gaussian_kde(all_θs)
else:
return gaussian_kde(np.concatenate(all_θs)) | def constructConditionalPDF(
gb, rs: np.ndarray, e: Tuple[str, str, Dict]
) -> gaussian_kde | Construct a conditional probability density function for a particular
AnalysisGraph edge. | 3.086696 | 3.089087 | 0.999226 |
xs = x.replace("\/", "|").split("/")
xs = [x.replace("|", "/") for x in xs]
if xs[0] == "FAO":
return " ".join(xs[2:]), xs[0]
else:
return xs[-1], xs[0] | def get_variable_and_source(x: str) | Process the variable name to make it more human-readable. | 5.223044 | 4.482983 | 1.165082 |
df = pd.read_sql_table("concept_to_indicator_mapping", con=engine)
gb = df.groupby("Concept")
_dict = {
k: [get_variable_and_source(x) for x in take(n, v["Indicator"].values)]
for k, v in gb
}
return _dict | def construct_concept_to_indicator_mapping(n: int = 1) -> Dict[str, List[str]] | Create a dictionary mapping high-level concepts to low-level indicators
Args:
n: Number of indicators to return
Returns:
Dictionary that maps concept names to lists of indicator names. | 5.540204 | 6.089864 | 0.909742 |
self.config = builder.configuration.mortality
self.population_view = builder.population.get_view(['alive'], query="alive == 'alive'")
self.randomness = builder.randomness.get_stream('mortality')
self.mortality_rate = builder.value.register_rate_producer('mortality_rate', source=self.base_mortality_rate)
builder.event.register_listener('time_step', self.determine_deaths) | def setup(self, builder: Builder) | Performs this component's simulation setup.
The ``setup`` method is automatically called by the simulation
framework. The framework passes in a ``builder`` object which
provides access to a variety of framework subsystems and metadata.
Parameters
----------
builder :
Access to simulation tools and subsystems. | 8.704159 | 8.746886 | 0.995115 |
return pd.Series(self.config.mortality_rate, index=index) | def base_mortality_rate(self, index: pd.Index) -> pd.Series | Computes the base mortality rate for every individual.
Parameters
----------
index :
A representation of the simulants to compute the base mortality
rate for.
Returns
-------
The base mortality rate for all simulants in the index. | 7.005514 | 9.466977 | 0.739995 |
effective_rate = self.mortality_rate(event.index)
effective_probability = 1 - np.exp(-effective_rate)
draw = self.randomness.get_draw(event.index)
affected_simulants = draw < effective_probability
self.population_view.update(pd.Series('dead', index=event.index[affected_simulants])) | def determine_deaths(self, event: Event) | Determines who dies each time step.
Parameters
----------
event :
An event object emitted by the simulation containing an index
representing the simulants affected by the event and timing
information. | 6.541428 | 5.900089 | 1.1087 |
components = []
for c in component_list:
path, args_plus = c.split('(')
cleaned_args = _clean_args(args_plus[:-1].split(','), path)
components.append((path, cleaned_args))
return components | def _prep_components(component_list: Sequence[str]) -> List[Tuple[str, Tuple[str]]] | Transform component description strings into tuples of component paths and required arguments.
Parameters
----------
component_list :
The component descriptions to transform.
Returns
-------
List of component/argument tuples. | 3.925388 | 4.245823 | 0.924529 |
if isinstance(component_config, ConfigTree):
component_list = self.parse_component_config(component_config.to_dict())
else: # Components were specified in a list rather than a tree.
component_list = component_config
component_list = _prep_components(component_list)
return _import_and_instantiate_components(component_list) | def get_components(self, component_config: Union[ConfigTree, List]) -> List | Extracts component specifications from configuration information and returns initialized components.
Parameters
----------
component_config :
A hierarchical component specification blob. This configuration information needs to be parsable
into a full import path and a set of initialization arguments by the ``parse_component_config``
method.
Returns
-------
List
A list of initialized components. | 4.161067 | 3.953768 | 1.052431 |
return _parse_component_config(component_config) | def parse_component_config(self, component_config: Dict[str, Union[Dict, List]]) -> List[str] | Parses a hierarchical component specification into a list of standardized component definitions.
This default parser expects component configurations as a list of dicts. Each dict at the top level
corresponds to a different package and has a single key. This key may be just the name of the package
or a Python style import path to the module in which components live. The values of the top level dicts
are a list of dicts or strings. If dicts, the keys are another step along the import path. If strings,
the strings are representations of calls to the class constructor of components to be generated. This
pattern may be arbitrarily nested.
Parameters
----------
component_config :
A hierarchical component specification blob.
Returns
-------
List
A list of standardized component definitions. Component definition strings are specified as
``'absolute.import.path.ClassName("argument1", "argument2", ...)'``. | 7.506727 | 15.112566 | 0.496721 |
# We first split file into pieces
searchunks = self._split_file()
if searchunks:
# And then we parse pieces into meaningful data
usage = self._parse_file(searchunks)
if 'CPU' in usage:
return False
self._sarinfo = usage
del usage
return True
else:
return False | def load_file(self) | Loads SAR format logfile in ASCII format (sarXX).
:return: ``True`` if loading and parsing of file went fine, \
``False`` if it failed (at any point) | 14.161588 | 11.677051 | 1.212771 |
try:
test = self._sarinfo["CPU"]
del test
except KeyError:
file_parsed = self.load_file()
if file_parsed:
return self._sarinfo
else:
return False
except:
### DEBUG
traceback.print_exc()
return False
return self._sarinfo | def get_sar_info(self) | Returns parsed sar info
:return: ``Dictionary``-style list of SAR data | 7.60017 | 7.980131 | 0.952387 |
# Filename passed checks through __init__
if ((self.__filename and os.access(self.__filename, os.R_OK))
or data != ''):
fhandle = None
if data == '':
try:
fhandle = os.open(self.__filename, os.O_RDONLY)
except OSError:
print(("Couldn't open file %s" % self.__filename))
fhandle = None
if fhandle or data != '':
datalength = 0
# Dealing with mmap difference on Windows and Linux
if platform.system() == 'Windows':
dataprot = mmap.ACCESS_READ
else:
dataprot = mmap.PROT_READ
if data != '':
fhandle = -1
datalength = len(data)
if platform.system() == 'Windows':
dataprot = mmap.ACCESS_READ | mmap.ACCESS_WRITE
else:
dataprot = mmap.PROT_READ | mmap.PROT_WRITE
try:
if platform.system() == 'Windows':
sarmap = mmap.mmap(
fhandle, length=datalength, access=dataprot
)
else:
sarmap = mmap.mmap(
fhandle, length=datalength, prot=dataprot
)
if data != '':
sarmap.write(data)
sarmap.flush()
sarmap.seek(0, os.SEEK_SET)
except (TypeError, IndexError):
if data == '':
os.close(fhandle)
traceback.print_exc()
# sys.exit(-1)
return False
# Here we'll store chunks of SAR file, unparsed
searchunks = []
oldchunkpos = 0
dlpos = sarmap.find('\n\n', 0)
size = 0
if data == '':
# We can do mmap.size() only on read-only mmaps
size = sarmap.size()
else:
# Otherwise, if data was passed to us,
# we measure its length
len(data)
# oldchunkpos = dlpos
while dlpos > -1: # mmap.find() returns -1 on failure.
tempchunk = sarmap.read(dlpos - oldchunkpos)
searchunks.append(tempchunk.strip())
# We remember position, add 2 for 2 DD's
# (newspaces in production). We have to remember
# relative value
oldchunkpos += (dlpos - oldchunkpos) + 2
# We position to new place, to be behind \n\n
# we've looked for.
try:
sarmap.seek(2, os.SEEK_CUR)
except ValueError:
print(('Out of bounds (%s)!\n' % (sarmap.tell())))
# Now we repeat find.
dlpos = sarmap.find("\n\n")
# If it wasn't the end of file, we want last piece of it
if oldchunkpos < size:
tempchunk = sarmap[oldchunkpos:]
searchunks.append(tempchunk.strip())
sarmap.close()
if fhandle != -1:
os.close(fhandle)
if searchunks:
return searchunks
else:
return False
return False | def _split_file(self, data='') | Splits SAR output or SAR output file (in ASCII format) in order to
extract info we need for it, in the format we want.
:param data: Input data instead of file
:type data: str.
:return: ``List``-style of SAR file sections separated by
the type of info they contain (SAR file sections) without
parsing what is exactly what at this point | 4.815315 | 4.700634 | 1.024397 |
usage = {}
output = {}
# If sar_parts is a list
if type(sar_parts) is list:
restart_pattern = re.compile(PATTERN_RESTART)
for PATTERNSNAME in ALL_PATTERNS:
patterns = ALL_PATTERNS[PATTERNSNAME]
rgxpattern = re.compile(patterns['PATTERN'])
for part in sar_parts:
if rgxpattern.search(part):
if PATTERNSNAME in usage:
usage[PATTERNSNAME] += '\n' + part
else:
usage[PATTERNSNAME] = part
try:
first_line = part.split('\n')[0]
except IndexError:
first_line = part
self.__fields[PATTERNSNAME] = self.__find_column(patterns['FIELDS'], first_line)
# Try to match restart time
if restart_pattern.search(part):
pieces = part.split()
self.__restart_times.append(pieces[0])
del pieces
del sar_parts
# Now we have parts pulled out and combined, do further
# processing.
for PATTERNSNAME in ALL_PATTERNS:
patterns = ALL_PATTERNS[PATTERNSNAME]
output[PATTERNSNAME] = self.__split_info(usage[PATTERNSNAME], PATTERNSNAME, patterns)
del usage
return output
return output | def _parse_file(self, sar_parts) | Parses splitted file to get proper information from split parts.
:param sar_parts: Array of SAR file parts
:return: ``Dictionary``-style info (but still non-parsed) \
from SAR file, split into sections we want to check | 4.375406 | 4.441741 | 0.985066 |
part_parts = part_first_line.split()
### DEBUG
# print("Parts: %s" % (part_parts))
return_dict = {}
counter = 0
for piece in part_parts:
for colname in column_names:
pattern_re = re.compile(colname)
if pattern_re.search(piece):
return_dict[colname] = counter
break
counter += 1
# Verify the content of the return dictionary, fill the blanks
# with -1s :-)
for colver in column_names:
try:
tempval = return_dict[colver]
del tempval
except KeyError:
return_dict[colver] = None
return return_dict | def __find_column(self, column_names, part_first_line) | Finds the column for the column_name in sar type definition,
and returns its index.
:param column_names: Names of the column we look for (regex) put in
the list
:param part_first_line: First line of the SAR part
:return: ``Dictionary`` of names => position, None for not present | 4.376092 | 4.2696 | 1.024942 |
pattern = patterns['PATTERN']
if pattern == '':
return False
return_dict = {}
pattern_re = re.compile(pattern)
for part_line in info_part.split('\n'):
if part_line.strip() != '' and not pattern_re.search(part_line):
# Take care of AM/PM timestamps in SAR file
is_24hr = True
is_AM = False
if part_line[9:11] == 'AM':
is_24hr = False
is_AM = True
elif part_line[9:11] == 'PM':
is_24hr = False
is_AM = False
if is_24hr is False:
part_line = ('%s_%s XX %s' % (part_line[:8], part_line[9:11], part_line[12:]))
# Line is not empty, nor it's header.
# let's hit the road Jack!
elems = part_line.split()
full_time = elems[0].strip()
if full_time != "Average:":
# Convert time to 24hr format if needed
if is_24hr is False:
full_time = full_time[:-3]
# 12 is a bitch in AM/PM notation
if full_time[:2] == '12':
if is_AM is True:
full_time = ('%s:%s' % ('00', full_time[3:]))
is_AM = not is_AM
if is_AM is False and full_time[0:2] != '00':
hours = int(full_time[:2]) + 12
hours = ('%02d' % (hours,))
full_time = ('%s:%s' % (hours, full_time[3:]))
try:
blah = return_dict[full_time]
del blah
except KeyError:
return_dict[full_time] = {}
fields = self.__fields[patternsname]
pairs = patterns["PAIRS"]
for sectionname in pairs.iterkeys():
value = elems[fields[pairs[sectionname]]]
if sectionname == 'membuffer' or \
sectionname == 'memcache' or \
sectionname == 'memfree' or \
sectionname == 'memused' or \
sectionname == 'swapfree' or \
sectionname == 'swapused':
value = int(value)
else:
value = float(value)
if patternsname == 'CPU':
cpuid = elems[(1 if is_24hr is True else 2)]
try:
blah = return_dict[full_time][cpuid]
del blah
except KeyError:
return_dict[full_time][cpuid] = {}
return_dict[full_time][cpuid][sectionname] = \
value
else:
return_dict[full_time][sectionname] = value
return return_dict | def __split_info(self, info_part, patternsname, patterns) | Splits info from SAR parts into logical stuff :-)
:param info_part: Part of SAR output we want to split into usable data
:param patternsname: ???
:param patterns: ???
:return: ``List``-style info from SAR files, now finally \
completely parsed into meaningful data for further processing | 3.319307 | 3.269555 | 1.015217 |
if os.access(self.__filename, os.R_OK):
# Read first line of the file
try:
sar_file = open(self.__filename, "r")
except OSError:
### DEBUG
traceback.print_exc()
return False
except:
### DEBUG
traceback.print_exc()
return False
firstline = sar_file.readline()
info = firstline.split()
sar_file.close()
try:
self.__file_date = info[3]
except KeyError:
self.__file_date = ''
return False
except:
### DEBUG
traceback.print_exc()
return False
return True
return False | def __get_filedate(self) | Parses (extracts) date of SAR data, from the SAR output file itself.
:return: ISO-style (YYYY-MM-DD) date from SAR file | 3.062851 | 2.984595 | 1.02622 |
if len(transition_set) == 0 or index.empty:
return
outputs, decisions = transition_set.choose_new_state(index)
groups = _groupby_new_state(index, outputs, decisions)
if groups:
for output, affected_index in sorted(groups, key=lambda x: str(x[0])):
if output == 'null_transition':
pass
elif isinstance(output, Transient):
if not isinstance(output, State):
raise ValueError('Invalid transition output: {}'.format(output))
output.transition_effect(affected_index, event_time, population_view)
output.next_state(affected_index, event_time, population_view)
elif isinstance(output, State):
output.transition_effect(affected_index, event_time, population_view)
else:
raise ValueError('Invalid transition output: {}'.format(output)) | def _next_state(index, event_time, transition_set, population_view) | Moves a population between different states using information from a `TransitionSet`.
Parameters
----------
index : iterable of ints
An iterable of integer labels for the simulants.
event_time : pandas.Timestamp
When this transition is occurring.
transition_set : TransitionSet
A set of potential transitions available to the simulants.
population_view : vivarium.framework.population.PopulationView
A view of the internal state of the simulation. | 3.614018 | 3.512548 | 1.028888 |
output_map = {o: i for i, o in enumerate(outputs)}
groups = pd.Series(index).groupby([output_map[d] for d in decisions])
results = [(outputs[i], pd.Index(sub_group.values)) for i, sub_group in groups]
selected_outputs = [o for o, _ in results]
for output in outputs:
if output not in selected_outputs:
results.append((output, pd.Index([])))
return results | def _groupby_new_state(index, outputs, decisions) | Groups the simulants in the index by their new output state.
Parameters
----------
index : iterable of ints
An iterable of integer labels for the simulants.
outputs : iterable
A list of possible output states.
decisions : `pandas.Series`
A series containing the name of the next state for each simulant in the index.
Returns
-------
iterable of 2-tuples
The first item in each tuple is the name of an output state and the second item
is a `pandas.Index` representing the simulants to transition into that state. | 3.090818 | 3.136615 | 0.985399 |
return _next_state(index, event_time, self.transition_set, population_view) | def next_state(self, index, event_time, population_view) | Moves a population between different states using information this state's `transition_set`.
Parameters
----------
index : iterable of ints
An iterable of integer labels for the simulants.
event_time : pandas.Timestamp
When this transition is occurring.
population_view : vivarium.framework.population.PopulationView
A view of the internal state of the simulation. | 6.300382 | 5.41988 | 1.162458 |
population_view.update(pd.Series(self.state_id, index=index))
self._transition_side_effect(index, event_time) | def transition_effect(self, index, event_time, population_view) | Updates the simulation state and triggers any side-effects associated with entering this state.
Parameters
----------
index : iterable of ints
An iterable of integer labels for the simulants.
event_time : pandas.Timestamp
The time at which this transition occurs.
population_view : `vivarium.framework.population.PopulationView`
A view of the internal state of the simulation. | 8.48208 | 7.10053 | 1.19457 |
t = Transition(self, output, probability_func=probability_func, triggered=triggered)
self.transition_set.append(t)
return t | def add_transition(self, output,
probability_func=lambda index: np.ones(len(index), dtype=float),
triggered=Trigger.NOT_TRIGGERED) | Builds a transition from this state to the given state.
output : State
The end state after the transition. | 2.851377 | 4.146368 | 0.687681 |
builder.components.add_components(self.transitions)
self.random = builder.randomness.get_stream(self.key) | def setup(self, builder) | Performs this component's simulation setup and return sub-components.
Parameters
----------
builder : `engine.Builder`
Interface to several simulation tools including access to common random
number generation, in particular.
Returns
-------
iterable
This component's sub-components. | 17.610348 | 16.883606 | 1.043044 |
outputs, probabilities = zip(*[(transition.output_state, np.array(transition.probability(index)))
for transition in self.transitions])
probabilities = np.transpose(probabilities)
outputs, probabilities = self._normalize_probabilities(outputs, probabilities)
return outputs, self.random.choice(index, outputs, probabilities) | def choose_new_state(self, index) | Chooses a new state for each simulant in the index.
Parameters
----------
index : iterable of ints
An iterable of integer labels for the simulants.
Returns
-------
outputs : list
The possible end states of this set of transitions.
decisions: `pandas.Series`
A series containing the name of the next state for each simulant in the index. | 5.825729 | 5.372245 | 1.084412 |
outputs = list(outputs)
total = np.sum(probabilities, axis=1)
if self.allow_null_transition or not np.any(total):
if np.any(total > 1+1e-08): # Accommodate rounding errors
raise ValueError(
"Null transition requested with un-normalized probability weights: {}".format(probabilities))
total[total > 1] = 1 # Correct allowed rounding errors.
probabilities = np.concatenate([probabilities, (1-total)[:, np.newaxis]], axis=1)
outputs.append('null_transition')
return outputs, probabilities/(np.sum(probabilities, axis=1)[:, np.newaxis]) | def _normalize_probabilities(self, outputs, probabilities) | Normalize probabilities to sum to 1 and add a null transition if desired.
Parameters
----------
outputs : iterable
List of possible end states corresponding to this containers transitions.
probabilities : iterable of iterables
A set of probability weights whose columns correspond to the end states in `outputs`
and whose rows correspond to each simulant undergoing the transition.
Returns
-------
outputs: list
The original output list expanded to include a null transition (a transition back
to the starting state) if requested.
probabilities : ndarray
The original probabilities rescaled to sum to 1 and potentially expanded to
include a null transition weight. | 4.894217 | 4.393651 | 1.11393 |
builder.components.add_components(self.states)
self.population_view = builder.population.get_view([self.state_column]) | def setup(self, builder) | Performs this component's simulation setup and return sub-components.
Parameters
----------
builder : `engine.Builder`
Interface to several simulation tools including access to common random
number generation, in particular.
Returns
-------
iterable
This component's sub-components. | 15.068765 | 14.768433 | 1.020336 |
for state, affected in self._get_state_pops(index):
if not affected.empty:
state.next_state(affected.index, event_time, self.population_view.subview([self.state_column])) | def transition(self, index, event_time) | Finds the population in each state and moves them to the next state.
Parameters
----------
index : iterable of ints
An iterable of integer labels for the simulants.
event_time : pandas.Timestamp
The time at which this transition occurs. | 11.722946 | 9.224278 | 1.270879 |
from graphviz import Digraph
dot = Digraph(format='png')
for state in self.states:
if isinstance(state, TransientState):
dot.node(state.state_id, style='dashed')
else:
dot.node(state.state_id)
for transition in state.transition_set:
dot.edge(state.state_id, transition.output.state_id, transition.label())
return dot | def to_dot(self) | Produces a ball and stick graph of this state machine.
Returns
-------
`graphviz.Digraph`
A ball and stick visualization of this state machine. | 2.723697 | 2.807074 | 0.970297 |
from sar import parser
sysstat_dir = '/var/log/sa'
single_file = ('%s/%s' % (sysstat_dir, 'sar21'))
# Single SAR file parsing
insar = parser.Parser(single_file)
print(("SAR file date: %s" % (insar.get_filedate())))
print("Content:\n")
pprint.pprint(insar.get_sar_info())
print(("-" * 78))
# Id you want to test his, please run something like
# $ cat /var/log/sa/sar* > sarcombined
# to create "combined" SAR file. Then uncomment the following:
'''
from sar import multiparser
multi_file = ('./%s' % ('sarcombined'))
inmulti = multiparser.Multiparser(multi_file)
inmulti.load_file()
print("Content:\n")
pprint.pprint(inmulti.get_sar_info())
''' | def main() | from sar import multiparser
multi_file = ('./%s' % ('sarcombined'))
inmulti = multiparser.Multiparser(multi_file)
inmulti.load_file()
print("Content:\n")
pprint.pprint(inmulti.get_sar_info()) | 7.413745 | 4.184007 | 1.771925 |
self._managers = _setup_components(builder, self._managers, configuration)
self._components = _setup_components(builder, self._components, configuration) | def setup_components(self, builder, configuration) | Apply component level configuration defaults to the global config and run setup methods on the components
registering and setting up any child components generated in the process.
Parameters
----------
builder:
Interface to several simulation tools.
configuration:
Simulation configuration parameters. | 4.096301 | 4.928253 | 0.831187 |
filter_in = filter_in or {}
filter_out = filter_out or {}
catalog = read_catalog_obj(catalog)
if filter_in or filter_out:
filtered_datasets = [
dataset for dataset in catalog["dataset"] if
_filter_dictionary(dataset, filter_in.get(
"dataset"), filter_out.get("dataset"))
]
else:
filtered_datasets = catalog["dataset"]
# realiza filtros especiales
if only_time_series:
filtered_datasets = [
dataset for dataset in filtered_datasets
if dataset_has_time_series(dataset)]
if meta_field:
return [dataset[meta_field] for dataset in filtered_datasets
if meta_field in dataset]
if exclude_meta_fields:
meta_filtered_datasets = []
for dataset in filtered_datasets:
dataset_meta_filtered = dataset.copy()
for excluded_meta_field in exclude_meta_fields:
dataset_meta_filtered.pop(excluded_meta_field, None)
meta_filtered_datasets.append(dataset_meta_filtered)
return meta_filtered_datasets
else:
return filtered_datasets | def get_datasets(catalog, filter_in=None, filter_out=None, meta_field=None,
exclude_meta_fields=None, only_time_series=False) | Devuelve una lista de datasets del catálogo o de uno de sus metadatos.
Args:
catalog (dict, str or DataJson): Representación externa/interna de un
catálogo. Una representación _externa_ es un path local o una
URL remota a un archivo con la metadata de un catálogo, en
formato JSON o XLSX. La representación _interna_ de un catálogo
es un diccionario. Ejemplos: http://datos.gob.ar/data.json,
http://www.ign.gob.ar/descargas/geodatos/catalog.xlsx,
"/energia/catalog.xlsx".
filter_in (dict): Devuelve los datasets cuyos atributos coinciden con
los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los datasets de ese publisher_name.
filter_out (dict): Devuelve los datasets cuyos atributos no coinciden
con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los datasets que no sean de ese publisher_name.
meta_field (str): Nombre de un metadato de Dataset. En lugar de
devolver los objetos completos "Dataset", devuelve una lista de
valores para ese metadato presentes en el catálogo.
exclude_meta_fields (list): Metadatos de Dataset que se quieren excluir
de los objetos Dataset devueltos.
only_time_series (bool): Si es verdadero, sólo devuelve datasets que
tengan por lo menos una distribución de series de tiempo. | 2.198589 | 2.181374 | 1.007892 |
filter_in = filter_in or {}
filter_out = filter_out or {}
catalog = read_catalog_obj(catalog)
distributions = []
for dataset in get_datasets(catalog, filter_in, filter_out):
for distribution in dataset.get("distribution", []):
# agrega el id del dataset
distribution["dataset_identifier"] = dataset["identifier"]
distributions.append(distribution)
filtered_distributions = [
distribution for distribution in distributions if
_filter_dictionary(distribution, filter_in.get("distribution"),
filter_out.get("distribution"))
]
# realiza filtros especiales
if only_time_series:
filtered_distributions = [distribution for distribution in
filtered_distributions if
distribution_has_time_index(distribution)]
if meta_field:
return [distribution[meta_field]
for distribution in filtered_distributions
if meta_field in distribution]
if exclude_meta_fields:
meta_filtered_distributions = []
for distribution in filtered_distributions:
distribution_meta_filtered = distribution.copy()
for excluded_meta_field in exclude_meta_fields:
distribution_meta_filtered.pop(excluded_meta_field, None)
meta_filtered_distributions.append(distribution_meta_filtered)
return meta_filtered_distributions
else:
return filtered_distributions | def get_distributions(catalog, filter_in=None, filter_out=None,
meta_field=None, exclude_meta_fields=None,
only_time_series=False) | Devuelve lista de distribuciones del catálogo o de uno de sus metadatos.
Args:
catalog (dict, str or DataJson): Representación externa/interna de un
catálogo. Una representación _externa_ es un path local o una
URL remota a un archivo con la metadata de un catálogo, en
formato JSON o XLSX. La representación _interna_ de un catálogo
es un diccionario. Ejemplos: http://datos.gob.ar/data.json,
http://www.ign.gob.ar/descargas/geodatos/catalog.xlsx,
"/energia/catalog.xlsx".
filter_in (dict): Devuelve los distribuciones cuyos atributos
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los distribuciones que pertenezcan a un dataset
de ese publisher_name.
filter_out (dict): Devuelve los distribuciones cuyos atributos no
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los distribuciones que no pertenezcan a un
dataset de ese publisher_name.
meta_field (str): Nombre de un metadato de Distribution. En lugar de
devolver los objetos completos Distribution, devuelve una lista de
valores para ese metadato presentes en el catálogo.
exclude_meta_fields (list): Metadatos de Distribution que se quieren
excluir de los objetos Distribution devueltos.
only_time_series (bool): Si es verdadero, sólo devuelve distribuciones
que sean distribuciones de series de tiempo. | 2.413435 | 2.432263 | 0.992259 |
filter_in = filter_in or {}
filter_out = filter_out or {}
catalog = read_catalog_obj(catalog)
# agrego atajos para filtros
if distribution_identifier:
if "distribution" not in filter_in:
filter_in["distribution"] = {}
filter_in["distribution"]["identifier"] = distribution_identifier
fields = []
for distribution in get_distributions(catalog, filter_in, filter_out,
only_time_series=only_time_series):
distribution_fields = distribution.get("field", [])
if isinstance(distribution_fields, list):
for field in distribution_fields:
if not only_time_series or field_is_time_series(field,
distribution):
# agrega el id del dataset
field["dataset_identifier"] = distribution[
"dataset_identifier"]
# agrega el id de la distribución
field["distribution_identifier"] = distribution.get(
"identifier")
fields.append(field)
filtered_fields = [field for field in fields if
_filter_dictionary(field, filter_in.get("field"),
filter_out.get("field"))]
if meta_field:
return [field[meta_field] for field in filtered_fields
if meta_field in field]
else:
return filtered_fields | def get_fields(catalog, filter_in=None, filter_out=None, meta_field=None,
only_time_series=False, distribution_identifier=None) | Devuelve lista de campos del catálogo o de uno de sus metadatos.
Args:
catalog (dict, str or DataJson): Representación externa/interna de un
catálogo. Una representación _externa_ es un path local o una
URL remota a un archivo con la metadata de un catálogo, en
formato JSON o XLSX. La representación _interna_ de un catálogo
es un diccionario. Ejemplos: http://datos.gob.ar/data.json,
http://www.ign.gob.ar/descargas/geodatos/catalog.xlsx,
"/energia/catalog.xlsx".
filter_in (dict): Devuelve los campos cuyos atributos
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los campos que pertenezcan a un dataset
de ese publisher_name.
filter_out (dict): Devuelve los campos cuyos atributos no
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los campos que no pertenezcan a un
dataset de ese publisher_name.
meta_field (str): Nombre de un metadato de Field. En lugar de
devolver los objetos completos "Field", devuelve una lista de
valores para ese metadato presentes en el catálogo.
exclude_meta_fields (list): Metadatos de Field que se quieren
excluir de los objetos Field devueltos.
only_time_series (bool): Si es verdadero, sólo devuelve campos
que sean series de tiempo. | 2.687143 | 2.620255 | 1.025527 |
msg = "Se requiere un 'identifier' o 'title' para buscar el dataset."
assert identifier or title, msg
catalog = read_catalog_obj(catalog)
# búsqueda optimizada por identificador
if identifier:
try:
return _get_dataset_by_identifier(catalog, identifier)
except BaseException:
try:
catalog._build_index()
return _get_dataset_by_identifier(catalog, identifier)
except BaseException:
filtered_datasets = get_datasets(
catalog, {"dataset": {"identifier": identifier}})
elif title: # TODO: is this required?
filtered_datasets = get_datasets(
catalog, {"dataset": {"title": title}})
if len(filtered_datasets) > 1:
if identifier:
raise ce.DatasetIdRepetitionError(
identifier, filtered_datasets)
elif title:
# TODO: Improve exceptions module!
raise ce.DatasetTitleRepetitionError(title, filtered_datasets)
elif len(filtered_datasets) == 0:
return None
else:
return filtered_datasets[0] | def get_dataset(catalog, identifier=None, title=None) | Devuelve un Dataset del catálogo. | 3.652771 | 3.412969 | 1.070262 |
msg = "Se requiere un 'identifier' o 'title' para buscar el distribution."
assert identifier or title, msg
catalog = read_catalog_obj(catalog)
# 1. BUSCA las distribuciones en el catálogo
# toma la distribution que tenga el id único
# búsqueda optimizada por identificador
if identifier:
try:
return _get_distribution_by_identifier(catalog, identifier)
except BaseException:
try:
catalog._build_index()
return _get_distribution_by_identifier(catalog, identifier)
except BaseException:
filtered_distributions = get_distributions(
catalog, {"distribution": {"identifier": identifier}})
# toma la distribution que tenga el título único, dentro de un dataset
elif title and dataset_identifier:
filtered_distributions = get_distributions(
catalog, {
"dataset": {"identifier": dataset_identifier},
"distribution": {"title": title}
}
)
# toma las distribution que tengan el título (puede haber más de una)
elif title:
filtered_distributions = get_distributions(
catalog, {"distribution": {"title": title}})
# 2. CHEQUEA que la cantidad de distribuciones es consistente
if len(filtered_distributions) > 1:
if identifier:
raise ce.DistributionIdRepetitionError(
identifier, filtered_distributions)
elif title and dataset_identifier:
# el título de una distribution no puede repetirse en un dataset
raise ce.DistributionTitleRepetitionError(
title, filtered_distributions)
elif title:
# el título de una distribution puede repetirse en el catalogo
return filtered_distributions
elif len(filtered_distributions) == 0:
return None
else:
return filtered_distributions[0] | def get_distribution(catalog, identifier=None, title=None,
dataset_identifier=None) | Devuelve una Distribution del catálogo. | 3.258279 | 3.10299 | 1.050045 |
msg = "Se requiere un 'id' o 'title' para buscar el field."
assert identifier or title, msg
# 1. BUSCA los fields en el catálogo
# búsqueda optimizada por identificador
if identifier:
try:
return _get_field_by_identifier(catalog, identifier)
except BaseException:
try:
catalog._build_index()
return _get_field_by_identifier(catalog, identifier)
except BaseException:
filtered_fields = get_fields(
catalog, {"field": {"id": identifier}})
elif title and distribution_identifier:
filtered_fields = get_fields(
catalog, {
"distribution": {"identifier": distribution_identifier},
"field": {"title": title}
}
)
elif title:
filtered_fields = get_fields(
catalog, {"field": {"title": title}})
# 2. CHEQUEA que la cantidad de fields es consistente
if len(filtered_fields) > 1:
if identifier:
raise ce.FieldIdRepetitionError(
identifier, filtered_fields)
elif title and distribution_identifier:
# el título de un field no puede repetirse en una distribution
raise ce.FieldTitleRepetitionError(
title, filtered_fields)
elif title:
# el título de un field puede repetirse
return filtered_fields
elif len(filtered_fields) == 0:
return None
else:
return filtered_fields[0] | def get_field(catalog, identifier=None, title=None,
distribution_identifier=None) | Devuelve un Field del catálogo. | 3.129733 | 2.948909 | 1.061319 |
exclude_meta_fields = exclude_meta_fields or []
catalog_dict_copy = catalog.copy()
del catalog_dict_copy["dataset"]
for excluded_meta_field in exclude_meta_fields:
catalog_dict_copy.pop(excluded_meta_field, None)
return catalog_dict_copy | def get_catalog_metadata(catalog, exclude_meta_fields=None) | Devuelve sólo la metadata de nivel catálogo. | 2.431689 | 2.249717 | 1.080886 |
text_template =
if "distribution" in dataset:
distributions = "".join(
map(distribution_to_markdown, dataset["distribution"]))
else:
distributions = ""
text = text_template.format(
title=dataset["title"],
description=dataset.get("description", ""),
distributions=distributions
)
return text | def dataset_to_markdown(dataset) | Genera texto en markdown a partir de los metadatos de una `dataset`.
Args:
dataset (dict): Diccionario con metadatos de una `dataset`.
Returns:
str: Texto que describe una `dataset`. | 3.951792 | 4.041536 | 0.977795 |
text_template =
if "field" in distribution:
fields = "- " + \
"\n- ".join(map(field_to_markdown, distribution["field"]))
else:
fields = ""
text = text_template.format(
title=distribution["title"],
description=distribution.get("description", ""),
fields=fields
)
return text | def distribution_to_markdown(distribution) | Genera texto en markdown a partir de los metadatos de una
`distribution`.
Args:
distribution (dict): Diccionario con metadatos de una
`distribution`.
Returns:
str: Texto que describe una `distribution`. | 3.694002 | 3.938336 | 0.93796 |
if "title" in field:
field_title = "**{}**".format(field["title"])
else:
raise Exception("Es necesario un `title` para describir un campo.")
field_type = " ({})".format(field["type"]) if "type" in field else ""
field_desc = ": {}".format(
field["description"]) if "description" in field else ""
text_template = "{title}{type}{description}"
text = text_template.format(title=field_title, type=field_type,
description=field_desc)
return text | def field_to_markdown(field) | Genera texto en markdown a partir de los metadatos de un `field`.
Args:
field (dict): Diccionario con metadatos de un `field`.
Returns:
str: Texto que describe un `field`. | 2.978445 | 2.72649 | 1.09241 |
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5%
else:
assert every is not None, 'sequence is iterator, set every'
if is_iterator:
progress = IntProgress(min=0, max=1, value=1)
progress.bar_style = 'info'
else:
progress = IntProgress(min=0, max=size, value=0)
label = HTML()
box = VBox(children=[label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = '{name}: {index} / ?'.format(
name=name,
index=index
)
else:
progress.value = index
label.value = u'{name}: {index} / {size}'.format(
name=name,
index=index,
size=size
)
yield record
except Exception as e:
progress.bar_style = 'danger'
raise
else:
progress.bar_style = 'success'
progress.value = index
label.value = "{name}: {index}".format(
name=name,
index=str(index or '?')
) | def log_progress(sequence, every=None, size=None, name='Items') | Taken from https://github.com/alexanderkuk/log-progress | 1.902398 | 1.863631 | 1.020802 |
try:
datajson_file = sys.argv[1]
dj_instance = DataJson()
bool_res = dj_instance.is_valid_catalog(datajson_file)
full_res = dj_instance.validate_catalog(datajson_file)
pretty_full_res = json.dumps(
full_res, indent=4, separators=(",", ": "))
logger.info(bool_res)
logger.info(pretty_full_res)
except IndexError as errmsg:
format_str =
logger.error(format_str.format(errmsg)) | def main() | Permite ejecutar el módulo por línea de comandos.
Valida un path o url a un archivo data.json devolviendo True/False si es
válido y luego el resultado completo.
Example:
python pydatajson.py http://181.209.63.71/data.json
python pydatajson.py ~/github/pydatajson/tests/samples/full_data.json | 4.328019 | 3.759573 | 1.151199 |
datasets_index = {}
distributions_index = {}
fields_index = {}
# recorre todos los datasets
for dataset_index, dataset in enumerate(self.datasets):
if "identifier" in dataset:
datasets_index[dataset["identifier"]] = {
"dataset_index": dataset_index
}
# recorre las distribuciones del dataset
for distribution_index, distribution in enumerate(
dataset.get("distribution", [])):
if "identifier" in distribution:
distributions_index[distribution["identifier"]] = {
"distribution_index": distribution_index,
"dataset_identifier": dataset["identifier"]
}
# recorre los fields de la distribucion
for field_index, field in enumerate(
distribution.get("field", [])):
if "id" in field:
fields_index[field["id"]] = {
"field_index":
field_index,
"dataset_identifier":
dataset["identifier"],
"distribution_identifier":
distribution["identifier"]
}
setattr(self, "_distributions_index", distributions_index)
setattr(self, "_datasets_index", datasets_index)
setattr(self, "_fields_index", fields_index) | def _build_index(self) | Itera todos los datasets, distribucioens y fields indexandolos. | 2.049918 | 1.758194 | 1.165922 |
catalog = catalog or self
return validation.is_valid_catalog(catalog, validator=self.validator) | def is_valid_catalog(self, catalog=None) | Valida que un archivo `data.json` cumpla con el schema definido.
Chequea que el data.json tiene todos los campos obligatorios y que
tanto los campos obligatorios como los opcionales siguen la estructura
definida en el schema.
Args:
catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado.
Si no se pasa, valida este catálogo.
Returns:
bool: True si el data.json cumple con el schema, sino False. | 6.502751 | 16.277836 | 0.399485 |
new_response = response.copy()
# El status del catálogo entero será ERROR
new_response["status"] = "ERROR"
# Adapto la información del ValidationError recibido a los fines
# del validador de DataJsons
error_info = {
# Error Code 1 para "campo obligatorio faltante"
# Error Code 2 para "error en tipo o formato de campo"
"error_code": 1 if error.validator == "required" else 2,
"message": error.message,
"validator": error.validator,
"validator_value": error.validator_value,
"path": list(error.path),
# La instancia validada es irrelevante si el error es de tipo 1
"instance": (None if error.validator == "required" else
error.instance)
}
# Identifico a qué nivel de jerarquía sucedió el error.
if len(error.path) >= 2 and error.path[0] == "dataset":
# El error está a nivel de un dataset particular o inferior
position = new_response["error"]["dataset"][error.path[1]]
else:
# El error está a nivel de catálogo
position = new_response["error"]["catalog"]
position["status"] = "ERROR"
position["errors"].append(error_info)
return new_response | def _update_validation_response(error, response) | Actualiza la respuesta por default acorde a un error de
validación. | 5.039967 | 4.825168 | 1.044516 |
catalog = catalog or self
return validation.validate_catalog(
catalog, only_errors, fmt, export_path, validator=self.validator) | def validate_catalog(self, catalog=None, only_errors=False, fmt="dict",
export_path=None) | Analiza un data.json registrando los errores que encuentra.
Chequea que el data.json tiene todos los campos obligatorios y que
tanto los campos obligatorios como los opcionales siguen la estructura
definida en el schema.
Args:
catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado.
Si no se pasa, valida este catálogo.
only_errors (bool): Si es True sólo se reportan los errores.
fmt (str): Indica el formato en el que se desea el reporte.
"dict" es el reporte más verborrágico respetando la
estructura del data.json.
"list" devuelve un dict con listas de errores formateados para
generar tablas.
export_path (str): Path donde exportar el reporte generado (en
formato XLSX o CSV). Si se especifica, el método no devolverá
nada, a pesar de que se pase algún argumento en `fmt`.
Returns:
dict: Diccionario resumen de los errores encontrados::
{
"status": "OK", # resultado de la validación global
"error": {
"catalog": {
"status": "OK",
"errors": []
"title": "Título Catalog"},
"dataset": [
{
"status": "OK",
"errors": [],
"title": "Titulo Dataset 1"
},
{
"status": "ERROR",
"errors": [error1_info, error2_info, ...],
"title": "Titulo Dataset 2"
}
]
}
}
Donde errorN_info es un dict con la información del N-ésimo
error encontrado, con las siguientes claves: "path", "instance",
"message", "validator", "validator_value", "error_code". | 4.563738 | 9.520445 | 0.479362 |
publisher_name = helpers.traverse_dict(dataset, ["publisher", "name"])
languages = cls._stringify_list(dataset.get("language"))
super_themes = cls._stringify_list(dataset.get("superTheme"))
themes = cls._stringify_list(dataset.get("theme"))
def _stringify_distribution(distribution):
title = distribution.get("title")
url = distribution.get("downloadURL")
return "\"{}\": {}".format(title, url)
distributions = [d for d in dataset["distribution"]
if isinstance(d, dict)]
# crea lista de distribuciones
distributions_list = None
if isinstance(distributions, list):
distributions_strings = [
_stringify_distribution(d) for d in distributions
]
distributions_list = "\n\n".join(distributions_strings)
# crea lista de formatos
distributions_formats = json.dumps(
helpers.count_distribution_formats_dataset(dataset))
fields = OrderedDict()
fields["dataset_identifier"] = dataset.get("identifier")
fields["dataset_title"] = dataset.get("title")
fields["dataset_accrualPeriodicity"] = dataset.get(
"accrualPeriodicity")
fields["dataset_description"] = dataset.get("description")
fields["dataset_publisher_name"] = publisher_name
fields["dataset_superTheme"] = super_themes
fields["dataset_theme"] = themes
fields["dataset_landingPage"] = dataset.get("landingPage")
fields["dataset_landingPage_generated"] = cls._generate_landingPage(
catalog_homepage, dataset.get("identifier")
)
fields["dataset_issued"] = dataset.get("issued")
fields["dataset_modified"] = dataset.get("modified")
fields["distributions_formats"] = distributions_formats
fields["distributions_list"] = distributions_list
fields["dataset_license"] = dataset.get("license")
fields["dataset_language"] = languages
fields["dataset_spatial"] = dataset.get("spatial")
fields["dataset_temporal"] = dataset.get("temporal")
return fields | def _dataset_report_helper(cls, dataset, catalog_homepage=None) | Toma un dict con la metadata de un dataset, y devuelve un dict coni
los valores que dataset_report() usa para reportar sobre él.
Args:
dataset (dict): Diccionario con la metadata de un dataset.
Returns:
dict: Diccionario con los campos a nivel dataset que requiere
dataset_report(). | 2.486974 | 2.530607 | 0.982758 |
fields = OrderedDict()
fields["catalog_metadata_url"] = url
fields["catalog_federation_id"] = catalog_id
fields["catalog_federation_org"] = catalog_org
fields["catalog_title"] = catalog.get("title")
fields["catalog_description"] = catalog.get("description")
fields["valid_catalog_metadata"] = (
1 if catalog_validation["status"] == "OK" else 0)
return fields | def _catalog_report_helper(catalog, catalog_validation, url, catalog_id,
catalog_org) | Toma un dict con la metadata de un catálogo, y devuelve un dict con
los valores que catalog_report() usa para reportar sobre él.
Args:
catalog (dict): Diccionario con la metadata de un catálogo.
validation (dict): Resultado, únicamente a nivel catálogo, de la
validación completa de `catalog`.
Returns:
dict: Diccionario con los campos a nivel catálogo que requiere
catalog_report(). | 2.965676 | 3.203364 | 0.925801 |
# hace un breve análisis de qa al dataset
good_qa, notes = self._dataset_qa(dataset)
dataset_report = OrderedDict(catalog_fields)
dataset_report["valid_dataset_metadata"] = (
1 if dataset_validation["status"] == "OK" else 0)
dataset_report["dataset_index"] = dataset_index
if isinstance(harvest, list):
dataset_report["harvest"] = 1 if dataset["title"] in harvest else 0
elif harvest == 'all':
dataset_report["harvest"] = 1
elif harvest == 'none':
dataset_report["harvest"] = 0
elif harvest == 'valid':
dataset_report["harvest"] = (
int(dataset_report["valid_dataset_metadata"]))
elif harvest == 'good':
valid_metadata = int(dataset_report["valid_dataset_metadata"]) == 1
dataset_report["harvest"] = 1 if valid_metadata and good_qa else 0
elif harvest == 'report':
if not report:
raise ValueError()
datasets_to_harvest = self._extract_datasets_to_harvest(report)
dataset_report["harvest"] = (
1 if (dataset_report["catalog_metadata_url"],
dataset.get("title")) in datasets_to_harvest
else 0)
else:
raise ValueError(.format(harvest))
dataset_report.update(
self._dataset_report_helper(
dataset, catalog_homepage=catalog_homepage)
)
dataset_report["notas"] = "\n\n".join(notes)
return dataset_report.copy() | def _dataset_report(
self, dataset, dataset_validation, dataset_index,
catalog_fields, harvest='none', report=None, catalog_homepage=None
) | Genera una línea del `catalog_report`, correspondiente a un dataset
de los que conforman el catálogo analizado. | 3.101971 | 3.047539 | 1.017861 |
# 1. VALIDACIONES
# chequea que haya por lo menos algún formato de datos reconocido
has_data_format = helpers.dataset_has_data_distributions(dataset)
# chequea que algunos campos tengan longitudes mínimas
has_title = "title" in dataset
has_description = "description" in dataset
if has_title:
has_min_title = len(dataset["title"]) >= MIN_DATASET_TITLE
else:
has_min_title = False
if has_description:
has_min_desc = len(
dataset["description"]) >= MIN_DATASET_DESCRIPTION
else:
has_min_desc = False
# 2. EVALUACION DE COSECHA: evalua si se cosecha o no el dataset
harvest = (has_title and has_description and
has_data_format and has_min_title and has_min_desc)
# 3. NOTAS: genera notas de validación
notes = []
if not has_data_format:
notes.append("No tiene distribuciones con datos.")
if not has_title:
notes.append("Dataset sin titulo {}".format(dataset))
else:
if not has_min_title:
notes.append("Titulo tiene menos de {} caracteres".format(
MIN_DATASET_TITLE))
if not has_description:
notes.append("Dataset sin descripcion {}".format(dataset))
else:
if not has_min_desc:
notes.append("Descripcion tiene menos de {} caracteres".format(
MIN_DATASET_DESCRIPTION))
return harvest, notes | def _dataset_qa(self, dataset) | Chequea si el dataset tiene una calidad mínima para cosechar. | 3.269149 | 2.975495 | 1.098691 |
url = catalog if isinstance(catalog, string_types) else None
catalog = readers.read_catalog(catalog)
validation = self.validate_catalog(catalog)
catalog_validation = validation["error"]["catalog"]
datasets_validations = validation["error"]["dataset"]
catalog_fields = self._catalog_report_helper(
catalog, catalog_validation, url, catalog_id, catalog_org
)
if "dataset" in catalog and isinstance(catalog["dataset"], list):
datasets = [d if isinstance(d, dict) else {} for d in
catalog["dataset"]]
else:
datasets = []
catalog_report = [
self._dataset_report(
dataset, datasets_validations[index], index,
catalog_fields, harvest, report=report,
catalog_homepage=catalog_homepage
)
for index, dataset in enumerate(datasets)
]
return catalog_report | def catalog_report(self, catalog, harvest='none', report=None,
catalog_id=None, catalog_homepage=None,
catalog_org=None) | Genera un reporte sobre los datasets de un único catálogo.
Args:
catalog (dict, str o unicode): Representación externa (path/URL) o
interna (dict) de un catálogo.
harvest (str): Criterio de cosecha ('all', 'none',
'valid', 'report' o 'good').
Returns:
list: Lista de diccionarios, con un elemento por cada dataset
presente en `catalog`. | 3.661453 | 3.573249 | 1.024684 |
# Si se pasa un único catálogo, genero una lista que lo contenga
if isinstance(catalogs, string_types + (dict,)):
catalogs = [catalogs]
if harvest == 'report':
if not report:
raise ValueError()
datasets_report = readers.read_table(report)
elif harvest in ['valid', 'none', 'all']:
# catalogs no puede faltar para estos criterios
assert isinstance(catalogs, string_types + (dict, list))
datasets_report = self.generate_datasets_report(catalogs, harvest)
else:
raise ValueError(.format(harvest))
# define los campos del reporte que mantiene para el config file
config_keys = [
"catalog_federation_id", "catalog_federation_org",
"dataset_identifier"
]
# cambia algunos nombres de estos campos para el config file
config_translator = {
"catalog_federation_id": "catalog_id",
"catalog_federation_org": "dataset_organization"
}
translated_keys = [config_translator.get(k, k) for k in config_keys]
harvester_config = [
OrderedDict(
# Retengo únicamente los campos que necesita el harvester
[(config_translator.get(k, k), v)
for (k, v) in dataset.items() if k in config_keys]
)
# Para aquellost datasets marcados con 'harvest'==1
for dataset in datasets_report if bool(int(dataset["harvest"]))
]
# chequea que el archivo de configuración tiene todos los campos
required_keys = set(translated_keys)
for row in harvester_config:
row_keys = set(row.keys())
msg = "Hay una fila con claves {} y debe tener claves {}".format(
row_keys, required_keys)
assert row_keys == required_keys, msg
if export_path:
writers.write_table(harvester_config, export_path)
else:
return harvester_config | def generate_harvester_config(self, catalogs=None, harvest='valid',
report=None, export_path=None) | Genera un archivo de configuración del harvester a partir de un
reporte, o de un conjunto de catálogos y un criterio de cosecha
(`harvest`).
Args:
catalogs (str, dict o list): Uno (str o dict) o varios (list de
strs y/o dicts) catálogos.
harvest (str): Criterio para determinar qué datasets incluir en el
archivo de configuración generado ('all', 'none',
'valid', 'report' o 'good').
report (list o str): Tabla de reporte generada por
generate_datasets_report() como lista de diccionarios o archivo
en formato XLSX o CSV. Sólo se usa cuando `harvest=='report'`,
en cuyo caso `catalogs` se ignora.
export_path (str): Path donde exportar el reporte generado (en
formato XLSX o CSV). Si se especifica, el método no devolverá
nada.
Returns:
list of dicts: Un diccionario con variables de configuración
por cada dataset a cosechar. | 4.217426 | 3.81368 | 1.105868 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.