_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q15500
line_starts_subpgm
train
def line_starts_subpgm(line: str) -> Tuple[bool, Optional[str]]: """ Indicates whether a line in the program is the first line of a subprogram definition. Args: line Returns: (True, f_name) if line begins a definition for subprogram f_name; (False, None) if line does not begin a subprogram definition. """ match = RE_SUB_START.match(line) if match != None: f_name = match.group(1) return (True, f_name) match = RE_FN_START.match(line) if match != None: f_name = match.group(1) return (True, f_name) return (False, None)
python
{ "resource": "" }
q15501
program_unit_name
train
def program_unit_name(line:str) -> str: """Given a line that starts a program unit, i.e., a program, module, subprogram, or function, this function returns the name associated with that program unit.""" match = RE_PGM_UNIT_START.match(line) assert match != None return match.group(2)
python
{ "resource": "" }
q15502
prepend
train
def prepend(x: T, xs: Iterable[T]) -> Iterator[T]: """ Prepend a value to an iterable. Parameters ---------- x An element of type T. xs An iterable of elements of type T. Returns ------- Iterator An iterator that yields *x* followed by elements of *xs*. Examples -------- >>> from delphi.utils.fp import prepend >>> list(prepend(1, [2, 3])) [1, 2, 3] """ return chain([x], xs)
python
{ "resource": "" }
q15503
append
train
def append(x: T, xs: Iterable[T]) -> Iterator[T]: """ Append a value to an iterable. Parameters ---------- x An element of type T. xs An iterable of elements of type T. Returns ------- Iterator An iterator that yields elements of *xs*, then yields *x*. Examples -------- >>> from delphi.utils.fp import append >>> list(append(1, [2, 3])) [2, 3, 1] """ return chain(xs, [x])
python
{ "resource": "" }
q15504
flatten
train
def flatten(xs: Union[List, Tuple]) -> List: """ Flatten a nested list or tuple. """ return ( sum(map(flatten, xs), []) if (isinstance(xs, list) or isinstance(xs, tuple)) else [xs] )
python
{ "resource": "" }
q15505
iterate
train
def iterate(f: Callable[[T], T], x: T) -> Iterator[T]: """ Makes infinite iterator that returns the result of successive applications of a function to an element .. math:: iterate(f, x) = [x, f(x), f(f(x)), f(f(f(x))), ...] Examples -------- >>> from delphi.utils.fp import iterate, take >>> list(take(5, iterate(lambda x: x*2, 1))) [1, 2, 4, 8, 16] """ return scanl(lambda x, _: f(x), x, repeat(None))
python
{ "resource": "" }
q15506
ptake
train
def ptake(n: int, xs: Iterable[T]) -> Iterable[T]: """ take with a tqdm progress bar. """ return tqdm(take(n, xs), total=n)
python
{ "resource": "" }
q15507
ltake
train
def ltake(n: int, xs: Iterable[T]) -> List[T]: """ A non-lazy version of take. """ return list(take(n, xs))
python
{ "resource": "" }
q15508
compose
train
def compose(*fs: Any) -> Callable: """ Compose functions from left to right. e.g. compose(f, g)(x) = f(g(x)) """ return foldl1(lambda f, g: lambda *x: f(g(*x)), fs)
python
{ "resource": "" }
q15509
rcompose
train
def rcompose(*fs: Any) -> Callable: """ Compose functions from right to left. e.g. rcompose(f, g)(x) = g(f(x)) """ return foldl1(lambda f, g: lambda *x: g(f(*x)), fs)
python
{ "resource": "" }
q15510
flatMap
train
def flatMap(f: Callable, xs: Iterable) -> List: """ Map a function onto an iterable and flatten the result. """ return flatten(lmap(f, xs))
python
{ "resource": "" }
q15511
process_climis_crop_production_data
train
def process_climis_crop_production_data(data_dir: str): """ Process CliMIS crop production data """ climis_crop_production_csvs = glob( "{data_dir}/Climis South Sudan Crop Production Data/" "Crops_EstimatedProductionConsumptionBalance*.csv" ) state_county_df = pd.read_csv( f"{data_dir}/ipc_data.csv", skipinitialspace=True ) combined_records = [] for f in climis_crop_production_csvs: year = int(f.split("/")[-1].split("_")[2].split(".")[0]) df = pd.read_csv(f).dropna() for i, r in df.iterrows(): record = { "Year": year, "Month": None, "Source": "CliMIS", "Country": "South Sudan", } region = r["State/County"].strip() if region.lower() in state_county_df["State"].str.lower().values: record["State"] = region record["County"] = None else: potential_states = state_county_df.loc[ state_county_df["County"] == region ]["State"] record["State"] = ( potential_states.iloc[0] if len(potential_states) != 0 else None ) record["County"] = region for field in r.index: if field != "State/County": if "Net Cereal production" in field: record["Variable"] = "Net Cereal Production" record["Value"] = r[field] if field.split()[-1].startswith("("): record["Unit"] = field.split()[-1][1:-1].lower() else: record["Unit"] = None combined_records.append(record) df = pd.DataFrame(combined_records) return df
python
{ "resource": "" }
q15512
AnalysisGraph.assign_uuids_to_nodes_and_edges
train
def assign_uuids_to_nodes_and_edges(self): """ Assign uuids to nodes and edges. """ for node in self.nodes(data=True): node[1]["id"] = str(uuid4()) for edge in self.edges(data=True): edge[2]["id"] = str(uuid4())
python
{ "resource": "" }
q15513
AnalysisGraph.from_statements_file
train
def from_statements_file(cls, file: str): """ Construct an AnalysisGraph object from a pickle file containing a list of INDRA statements. """ with open(file, "rb") as f: sts = pickle.load(f) return cls.from_statements(sts)
python
{ "resource": "" }
q15514
AnalysisGraph.from_text
train
def from_text(cls, text: str): """ Construct an AnalysisGraph object from text, using Eidos to perform machine reading. """ eidosProcessor = process_text(text) return cls.from_statements(eidosProcessor.statements)
python
{ "resource": "" }
q15515
AnalysisGraph.from_uncharted_json_file
train
def from_uncharted_json_file(cls, file): """ Construct an AnalysisGraph object from a file containing INDRA statements serialized exported by Uncharted's CauseMos webapp. """ with open(file, "r") as f: _dict = json.load(f) return cls.from_uncharted_json_serialized_dict(_dict)
python
{ "resource": "" }
q15516
AnalysisGraph.from_uncharted_json_serialized_dict
train
def from_uncharted_json_serialized_dict( cls, _dict, minimum_evidence_pieces_required: int = 1 ): """ Construct an AnalysisGraph object from a dict of INDRA statements exported by Uncharted's CauseMos webapp. """ sts = _dict["statements"] G = nx.DiGraph() for s in sts: if len(s["evidence"]) >= minimum_evidence_pieces_required: subj, obj = s["subj"], s["obj"] if ( subj["db_refs"]["concept"] is not None and obj["db_refs"]["concept"] is not None ): subj_name, obj_name = [ "/".join(s[x]["db_refs"]["concept"].split("/")[:]) for x in ["subj", "obj"] ] G.add_edge(subj_name, obj_name) subj_delta = s["subj_delta"] obj_delta = s["obj_delta"] for delta in (subj_delta, obj_delta): # TODO : Ensure that all the statements provided by # Uncharted have unambiguous polarities. if delta["polarity"] is None: delta["polarity"] = 1 influence_stmt = Influence( Concept(subj_name, db_refs=subj["db_refs"]), Concept(obj_name, db_refs=obj["db_refs"]), subj_delta=s["subj_delta"], obj_delta=s["obj_delta"], evidence=[ INDRAEvidence( source_api=ev["source_api"], annotations=ev["annotations"], text=ev["text"], epistemics=ev.get("epistemics"), ) for ev in s["evidence"] ], ) influence_sts = G.edges[subj_name, obj_name].get( "InfluenceStatements", [] ) influence_sts.append(influence_stmt) G.edges[subj_name, obj_name][ "InfluenceStatements" ] = influence_sts for concept, indicator in _dict[ "concept_to_indicator_mapping" ].items(): if indicator is not None: indicator_source, indicator_name = ( indicator["name"].split("/")[0], "/".join(indicator["name"].split("/")[1:]), ) if concept in G: if G.nodes[concept].get("indicators") is None: G.nodes[concept]["indicators"] = {} G.nodes[concept]["indicators"][indicator_name] = Indicator( indicator_name, indicator_source ) self = cls(G) self.assign_uuids_to_nodes_and_edges() return self
python
{ "resource": "" }
q15517
AnalysisGraph.sample_observed_state
train
def sample_observed_state(self, s: pd.Series) -> Dict: """ Sample observed state vector. This is the implementation of the emission function. Args: s: Latent state vector. Returns: Observed state vector. """ return { n[0]: { i.name: np.random.normal(s[n[0]] * i.mean, i.stdev) for i in n[1]["indicators"].values() } for n in self.nodes(data=True) }
python
{ "resource": "" }
q15518
AnalysisGraph.sample_from_likelihood
train
def sample_from_likelihood(self, n_timesteps=10): """ Sample a collection of observed state sequences from the likelihood model given a collection of transition matrices. Args: n_timesteps: The number of timesteps for the sequences. """ self.latent_state_sequences = lmap( lambda A: ltake( n_timesteps, iterate( lambda s: pd.Series(A @ s.values, index=s.index), self.s0 ), ), self.transition_matrix_collection, ) self.observed_state_sequences = [ [self.sample_observed_state(s) for s in latent_state_sequence] for latent_state_sequence in self.latent_state_sequences ]
python
{ "resource": "" }
q15519
AnalysisGraph.get_timeseries_values_for_indicators
train
def get_timeseries_values_for_indicators( self, resolution: str = "month", months: Iterable[int] = range(6, 9) ): """ Attach timeseries to indicators, for performing Bayesian inference. """ if resolution == "month": funcs = [ partial(get_indicator_value, month=month) for month in months ] else: raise NotImplementedError( "Currently, only the 'month' resolution is supported." ) for n in self.nodes(data=True): for indicator in n[1]["indicators"].values(): indicator.timeseries = [ func(indicator, year="2017")[0] for func in funcs ] if len(set(indicator.timeseries)) == 1: indicator.timeseries = None
python
{ "resource": "" }
q15520
AnalysisGraph.sample_from_posterior
train
def sample_from_posterior(self, A: pd.DataFrame) -> None: """ Run Bayesian inference - sample from the posterior distribution.""" self.sample_from_proposal(A) self.set_latent_state_sequence(A) self.update_log_prior(A) self.update_log_likelihood() candidate_log_joint_probability = self.log_prior + self.log_likelihood delta_log_joint_probability = ( candidate_log_joint_probability - self.log_joint_probability ) acceptance_probability = min(1, np.exp(delta_log_joint_probability)) if acceptance_probability > np.random.rand(): self.update_log_joint_probability() else: A[f"∂({self.source})/∂t"][self.target] = self.original_value self.set_latent_state_sequence(A) self.update_log_likelihood() self.update_log_prior(A) self.update_log_joint_probability()
python
{ "resource": "" }
q15521
AnalysisGraph.create_bmi_config_file
train
def create_bmi_config_file(self, filename: str = "bmi_config.txt") -> None: """ Create a BMI config file to initialize the model. Args: filename: The filename with which the config file should be saved. """ s0 = self.construct_default_initial_state() s0.to_csv(filename, index_label="variable")
python
{ "resource": "" }
q15522
AnalysisGraph.export_node
train
def export_node(self, n) -> Dict[str, Union[str, List[str]]]: """ Return dict suitable for exporting to JSON. Args: n: A dict representing the data in a networkx AnalysisGraph node. Returns: The node dict with additional fields for name, units, dtype, and arguments. """ node_dict = { "name": n[0], "units": _get_units(n[0]), "dtype": _get_dtype(n[0]), "arguments": list(self.predecessors(n[0])), } if not n[1].get("indicators") is None: for indicator in n[1]["indicators"].values(): if "dataset" in indicator.__dict__: del indicator.__dict__["dataset"] node_dict["indicators"] = [ _process_datetime(indicator.__dict__) for indicator in n[1]["indicators"].values() ] else: node_dict["indicators"] = None return node_dict
python
{ "resource": "" }
q15523
AnalysisGraph.map_concepts_to_indicators
train
def map_concepts_to_indicators( self, n: int = 1, min_temporal_res: Optional[str] = None ): """ Map each concept node in the AnalysisGraph instance to one or more tangible quantities, known as 'indicators'. Args: n: Number of matches to keep min_temporal_res: Minimum temporal resolution that the indicators must have data for. """ for node in self.nodes(data=True): query_parts = [ "select Indicator from concept_to_indicator_mapping", f"where `Concept` like '{node[0]}'", ] # TODO May need to delve into SQL/database stuff a bit more deeply # for this. Foreign keys perhaps? query = " ".join(query_parts) results = engine.execute(query) if min_temporal_res is not None: if min_temporal_res not in ["month"]: raise ValueError("min_temporal_res must be 'month'") vars_with_required_temporal_resolution = [ r[0] for r in engine.execute( "select distinct `Variable` from indicator where " f"`{min_temporal_res.capitalize()}` is not null" ) ] results = [ r for r in results if r[0] in vars_with_required_temporal_resolution ] node[1]["indicators"] = { x: Indicator(x, "MITRE12") for x in [r[0] for r in take(n, results)] }
python
{ "resource": "" }
q15524
AnalysisGraph.parameterize
train
def parameterize( self, country: Optional[str] = "South Sudan", state: Optional[str] = None, year: Optional[int] = None, month: Optional[int] = None, unit: Optional[str] = None, fallback_aggaxes: List[str] = ["year", "month"], aggfunc: Callable = np.mean, ): """ Parameterize the analysis graph. Args: country year month fallback_aggaxes: An iterable of strings denoting the axes upon which to perform fallback aggregation if the desired constraints cannot be met. aggfunc: The function that will be called to perform the aggregation if there are multiple matches. """ valid_axes = ("country", "state", "year", "month") if any(map(lambda axis: axis not in valid_axes, fallback_aggaxes)): raise ValueError( "All elements of the fallback_aggaxes set must be one of the " f"following: {valid_axes}" ) for n in self.nodes(data=True): for indicator in n[1]["indicators"].values(): indicator.mean, indicator.unit = get_indicator_value( indicator, country, state, year, month, unit, fallback_aggaxes, aggfunc, ) indicator.stdev = 0.1 * abs(indicator.mean)
python
{ "resource": "" }
q15525
AnalysisGraph.delete_nodes
train
def delete_nodes(self, nodes: Iterable[str]): """ Iterate over a set of nodes and remove the ones that are present in the graph. """ for n in nodes: if self.has_node(n): self.remove_node(n)
python
{ "resource": "" }
q15526
AnalysisGraph.delete_node
train
def delete_node(self, node: str): """ Removes a node if it is in the graph. """ if self.has_node(node): self.remove_node(node)
python
{ "resource": "" }
q15527
AnalysisGraph.delete_edge
train
def delete_edge(self, source: str, target: str): """ Removes an edge if it is in the graph. """ if self.has_edge(source, target): self.remove_edge(source, target)
python
{ "resource": "" }
q15528
AnalysisGraph.delete_edges
train
def delete_edges(self, edges: Iterable[Tuple[str, str]]): """ Iterate over a set of edges and remove the ones that are present in the graph. """ for edge in edges: if self.has_edge(*edge): self.remove_edge(*edge)
python
{ "resource": "" }
q15529
AnalysisGraph.merge_nodes
train
def merge_nodes(self, n1: str, n2: str, same_polarity: bool = True): """ Merge node n1 into node n2, with the option to specify relative polarity. Args: n1 n2 same_polarity """ for p in self.predecessors(n1): for st in self[p][n1]["InfluenceStatements"]: if not same_polarity: st.obj_delta["polarity"] = -st.obj_delta["polarity"] st.obj.db_refs["UN"][0] = (n2, st.obj.db_refs["UN"][0][1]) if not self.has_edge(p, n2): self.add_edge(p, n2) self[p][n2]["InfluenceStatements"] = self[p][n1][ "InfluenceStatements" ] else: self[p][n2]["InfluenceStatements"] += self[p][n1][ "InfluenceStatements" ] for s in self.successors(n1): for st in self.edges[n1, s]["InfluenceStatements"]: if not same_polarity: st.subj_delta["polarity"] = -st.subj_delta["polarity"] st.subj.db_refs["UN"][0] = (n2, st.subj.db_refs["UN"][0][1]) if not self.has_edge(n2, s): self.add_edge(n2, s) self[n2][s]["InfluenceStatements"] = self[n1][s][ "InfluenceStatements" ] else: self[n2][s]["InfluenceStatements"] += self[n1][s][ "InfluenceStatements" ] self.remove_node(n1)
python
{ "resource": "" }
q15530
AnalysisGraph.get_subgraph_for_concept
train
def get_subgraph_for_concept( self, concept: str, depth: int = 1, reverse: bool = False ): """ Returns a new subgraph of the analysis graph for a single concept. Args: concept: The concept that the subgraph will be centered around. depth: The depth to which the depth-first search must be performed. reverse: Sets the direction of causal influence flow to examine. Setting this to False (default) will search for upstream causal influences, and setting it to True will search for downstream causal influences. Returns: AnalysisGraph """ nodeset = {concept} if reverse: func = self.predecessors else: func = self.successors for i in range(depth): nodeset.update( chain.from_iterable([list(func(n)) for n in nodeset]) ) return AnalysisGraph(self.subgraph(nodeset).copy())
python
{ "resource": "" }
q15531
create_pgm_dict
train
def create_pgm_dict( lambdaFile: str, asts: List, file_name: str, mode_mapper_dict: dict, save_file=False, ) -> Dict: """ Create a Python dict representing the PGM, with additional metadata for JSON output. """ lambdaStrings = ["import math\n\n"] state = PGMState(lambdaStrings) generator = GrFNGenerator() generator.mode_mapper = mode_mapper_dict pgm = generator.genPgm(asts, state, {}, "")[0] if pgm.get("start"): pgm["start"] = pgm["start"][0] else: pgm["start"] = generator.function_defs[-1] pgm["source"] = [[get_path(file_name, "source")]] # dateCreated stores the date and time on which the lambda and PGM file was created. # It is stored in YYYMMDD format pgm["dateCreated"] = f"{datetime.today().strftime('%Y%m%d')}" with open(lambdaFile, "w") as f: f.write("".join(lambdaStrings)) # View the PGM file that will be used to build a scope tree if save_file: json.dump(pgm, open(file_name[:file_name.rfind(".")] + ".json", "w")) return pgm
python
{ "resource": "" }
q15532
filter_and_process_statements
train
def filter_and_process_statements( sts, grounding_score_cutoff: float = 0.8, belief_score_cutoff: float = 0.85, concepts_of_interest: List[str] = [], ): """ Filter preassembled statements according to certain rules. """ filtered_sts = [] counters = {} def update_counter(counter_name): if counter_name in counters: counters[counter_name] += 1 else: counters[counter_name] = 1 for s in tqdm(sts): update_counter("Original number of statements") # Apply belief score threshold cutoff if not s.belief > belief_score_cutoff: continue update_counter(f"Statements with belief score > {belief_score_cutoff}") # Select statements with UN groundings if s.subj.db_refs.get("UN") is None or s.obj.db_refs.get("UN") is None: continue update_counter("Statements with UN groundings") # Apply grounding score cutoffs if not all( x[1] > grounding_score_cutoff for x in (y.db_refs["UN"][0] for y in (s.subj, s.obj)) ): continue update_counter( f"Statements with subj and obj grounding scores > {grounding_score_cutoff}" ) # Assign default polarities if s.subj_delta["polarity"] is None: s.subj_delta["polarity"] = 1 if s.obj_delta["polarity"] is None: s.obj_delta["polarity"] = 1 filtered_sts.append(s) for k, v in counters.items(): print(f"{k}: {v}") return filtered_sts
python
{ "resource": "" }
q15533
create_CAG_with_indicators
train
def create_CAG_with_indicators(input, output, filename="CAG_with_indicators.pdf"): """ Create a CAG with mapped indicators """ with open(input, "rb") as f: G = pickle.load(f) G.map_concepts_to_indicators(min_temporal_res="month") G.set_indicator("UN/events/weather/precipitation", "Historical Average Total Daily Rainfall (Maize)", "DSSAT") G.set_indicator("UN/events/human/agriculture/food_production", "Historical Production (Maize)", "DSSAT") G.set_indicator("UN/entities/human/food/food_security", "IPC Phase Classification", "FEWSNET") G.set_indicator("UN/entities/food_availability", "Production, Meat indigenous, total", "FAO") G.set_indicator("UN/entities/human/financial/economic/market", "Inflation Rate", "ieconomics.com") G.set_indicator("UN/events/human/death", "Battle-related deaths", "WDI") with open(output, "wb") as f: pickle.dump(G, f)
python
{ "resource": "" }
q15534
ComputationalGraph.run
train
def run( self, inputs: Dict[str, Union[float, Iterable]], torch_size: Optional[int] = None, ) -> Union[float, Iterable]: """Executes the GrFN over a particular set of inputs and returns the result. Args: inputs: Input set where keys are the names of input nodes in the GrFN and each key points to a set of input values (or just one). Returns: A set of outputs from executing the GrFN, one for every set of inputs. """ # Set input values for i in self.inputs: self.nodes[i]["value"] = inputs[i] for func_set in self.function_sets: for func_name in func_set: lambda_fn = self.nodes[func_name]["lambda_fn"] output_node = list(self.successors(func_name))[0] signature = self.nodes[func_name]["func_inputs"] input_values = [self.nodes[n]["value"] for n in signature] res = lambda_fn(*input_values) if torch_size is not None and len(signature) == 0: self.nodes[output_node]["value"] = torch.tensor( [res] * torch_size, dtype=torch.double ) else: self.nodes[output_node]["value"] = res # Return the output return self.nodes[self.output_node]["value"]
python
{ "resource": "" }
q15535
GroundedFunctionNetwork.traverse_nodes
train
def traverse_nodes(self, node_set, depth=0): """BFS traversal of nodes that returns name traversal as large string. Args: node_set: Set of input nodes to begin traversal. depth: Current traversal depth for child node viewing. Returns: type: String containing tabbed traversal view. """ tab = " " result = list() for n in node_set: repr = ( n if self.nodes[n]["type"] == "variable" else f"{n}{inspect.signature(self.nodes[n]['lambda_fn'])}" ) result.append(f"{tab * depth}{repr}") result.extend( self.traverse_nodes(self.successors(n), depth=depth + 1) ) return result
python
{ "resource": "" }
q15536
GroundedFunctionNetwork.from_json_and_lambdas
train
def from_json_and_lambdas(cls, file: str, lambdas): """Builds a GrFN from a JSON object. Args: cls: The class variable for object creation. file: Filename of a GrFN JSON file. Returns: type: A GroundedFunctionNetwork object. """ with open(file, "r") as f: data = json.load(f) return cls.from_dict(data, lambdas)
python
{ "resource": "" }
q15537
GroundedFunctionNetwork.from_python_file
train
def from_python_file( cls, python_file, lambdas_path, json_filename: str, stem: str ): """Builds GrFN object from Python file.""" with open(python_file, "r") as f: pySrc = f.read() return cls.from_python_src(pySrc, lambdas_path, json_filename, stem)
python
{ "resource": "" }
q15538
GroundedFunctionNetwork.from_python_src
train
def from_python_src( cls, pySrc, lambdas_path, json_filename: str, stem: str, save_file: bool = False, ): """Builds GrFN object from Python source code.""" asts = [ast.parse(pySrc)] pgm_dict = genPGM.create_pgm_dict( lambdas_path, asts, json_filename, {"FileName": f"{stem}.py"}, # HACK ) lambdas = importlib.__import__(stem + "_lambdas") return cls.from_dict(pgm_dict, lambdas)
python
{ "resource": "" }
q15539
GroundedFunctionNetwork.from_fortran_file
train
def from_fortran_file(cls, fortran_file: str, tmpdir: str = "."): """Builds GrFN object from a Fortran program.""" stem = Path(fortran_file).stem if tmpdir == "." and "/" in fortran_file: tmpdir = Path(fortran_file).parent preprocessed_fortran_file = f"{tmpdir}/{stem}_preprocessed.f" lambdas_path = f"{tmpdir}/{stem}_lambdas.py" json_filename = stem + ".json" with open(fortran_file, "r") as f: inputLines = f.readlines() with open(preprocessed_fortran_file, "w") as f: f.write(preprocessor.process(inputLines)) xml_string = sp.run( [ "java", "fortran.ofp.FrontEnd", "--class", "fortran.ofp.XMLPrinter", "--verbosity", "0", preprocessed_fortran_file, ], stdout=sp.PIPE, ).stdout trees = [ET.fromstring(xml_string)] comments = get_comments.get_comments(preprocessed_fortran_file) os.remove(preprocessed_fortran_file) xml_to_json_translator = translate.XMLToJSONTranslator() outputDict = xml_to_json_translator.analyze(trees, comments) pySrc = pyTranslate.create_python_source_list(outputDict)[0][0] G = cls.from_python_src(pySrc, lambdas_path, json_filename, stem) return G
python
{ "resource": "" }
q15540
GroundedFunctionNetwork.from_fortran_src
train
def from_fortran_src(cls, fortran_src: str, dir: str = "."): """ Create a GroundedFunctionNetwork instance from a string with raw Fortran code. Args: fortran_src: A string with Fortran source code. dir: (Optional) - the directory in which the temporary Fortran file will be created (make sure you have write permission!) Defaults to the current directory. Returns: A GroundedFunctionNetwork instance """ import tempfile fp = tempfile.NamedTemporaryFile('w+t', delete=False, dir=dir) fp.writelines(fortran_src) fp.close() G = cls.from_fortran_file(fp.name, dir) os.remove(fp.name) return G
python
{ "resource": "" }
q15541
GroundedFunctionNetwork.clear
train
def clear(self): """Clear variable nodes for next computation.""" for n in self.nodes(): if self.nodes[n]["type"] == "variable": self.nodes[n]["value"] = None elif self.nodes[n]["type"] == "function": self.nodes[n]["func_visited"] = False
python
{ "resource": "" }
q15542
GroundedFunctionNetwork.to_FIB
train
def to_FIB(self, other): """ Creates a ForwardInfluenceBlanket object representing the intersection of this model with the other input model. Args: other: The GroundedFunctionNetwork object to compare this model to. Returns: A ForwardInfluenceBlanket object to use for model comparison. """ if not isinstance(other, GroundedFunctionNetwork): raise TypeError( f"Expected GroundedFunctionNetwork, but got {type(other)}" ) def shortname(var): return var[var.find("::") + 2 : var.rfind("_")] def shortname_vars(graph, shortname): return [v for v in graph.nodes() if shortname in v] this_var_nodes = [ shortname(n) for (n, d) in self.nodes(data=True) if d["type"] == "variable" ] other_var_nodes = [ shortname(n) for (n, d) in other.nodes(data=True) if d["type"] == "variable" ] shared_vars = set(this_var_nodes).intersection(set(other_var_nodes)) full_shared_vars = { full_var for shared_var in shared_vars for full_var in shortname_vars(self, shared_var) } return ForwardInfluenceBlanket(self, full_shared_vars)
python
{ "resource": "" }
q15543
GroundedFunctionNetwork.to_agraph
train
def to_agraph(self): """ Export to a PyGraphviz AGraph object. """ A = nx.nx_agraph.to_agraph(self) A.graph_attr.update( {"dpi": 227, "fontsize": 20, "fontname": "Menlo", "rankdir": "TB"} ) A.node_attr.update({"fontname": "Menlo"}) def build_tree(cluster_name, root_graph): subgraph_nodes = [ n[0] for n in self.nodes(data=True) if n[1]["parent"] == cluster_name ] root_graph.add_nodes_from(subgraph_nodes) subgraph = root_graph.add_subgraph( subgraph_nodes, name=f"cluster_{cluster_name}", label=cluster_name, style="bold, rounded", ) for n in self.scope_tree.successors(cluster_name): build_tree(n, subgraph) root = [n for n, d in self.scope_tree.in_degree() if d == 0][0] build_tree(root, A) return A
python
{ "resource": "" }
q15544
GroundedFunctionNetwork.to_CAG_agraph
train
def to_CAG_agraph(self): """Returns a variable-only view of the GrFN in the form of an AGraph. Returns: type: A CAG constructed via variable influence in the GrFN object. """ CAG = self.to_CAG() A = nx.nx_agraph.to_agraph(CAG) A.graph_attr.update({"dpi": 227, "fontsize": 20, "fontname": "Menlo"}) A.node_attr.update( { "shape": "rectangle", "color": "#650021", "style": "rounded", "fontname": "Gill Sans", } ) A.edge_attr.update({"color": "#650021", "arrowsize": 0.5}) return A
python
{ "resource": "" }
q15545
GroundedFunctionNetwork.to_call_agraph
train
def to_call_agraph(self): """ Build a PyGraphviz AGraph object corresponding to a call graph of functions. """ A = nx.nx_agraph.to_agraph(self.call_graph) A.graph_attr.update({"dpi": 227, "fontsize": 20, "fontname": "Menlo"}) A.node_attr.update( {"shape": "rectangle", "color": "#650021", "style": "rounded"} ) A.edge_attr.update({"color": "#650021", "arrowsize": 0.5}) return A
python
{ "resource": "" }
q15546
ForwardInfluenceBlanket.S2_surface
train
def S2_surface(self, sizes, bounds, presets, covers, use_torch=False, num_samples = 10): """Calculates the sensitivity surface of a GrFN for the two variables with the highest S2 index. Args: num_samples: Number of samples for sensitivity analysis. sizes: Tuple of (number of x inputs, number of y inputs). bounds: Set of bounds for GrFN inputs. presets: Set of standard values for GrFN inputs. Returns: Tuple: Tuple: The names of the two variables that were selected Tuple: The X, Y vectors of eval values Z: The numpy matrix of output evaluations """ args = self.inputs Si = self.sobol_analysis( num_samples, { "num_vars": len(args), "names": args, "bounds": [bounds[arg] for arg in args], }, covers ) S2 = Si["S2"] (s2_max, v1, v2) = get_max_s2_sensitivity(S2) x_var = args[v1] y_var = args[v2] search_space = [(x_var, bounds[x_var]), (y_var, bounds[y_var])] preset_vals = { arg: presets[arg] for i, arg in enumerate(args) if i != v1 and i != v2 } X = np.linspace(*search_space[0][1], sizes[0]) Y = np.linspace(*search_space[1][1], sizes[1]) if use_torch: Xm, Ym = torch.meshgrid(torch.tensor(X), torch.tensor(Y)) inputs = {n: torch.full_like(Xm, v) for n, v in presets.items()} inputs.update({search_space[0][0]: Xm, search_space[1][0]: Ym}) Z = self.run(inputs, covers).numpy() else: Xm, Ym = np.meshgrid(X, Y) Z = np.zeros((len(X), len(Y))) for x, y in itertools.product(range(len(X)), range(len(Y))): inputs = {n: v for n, v in presets.items()} inputs.update({search_space[0][0]: x, search_space[1][0]: y}) Z[x][y] = self.run(inputs, covers) return X, Y, Z, x_var, y_var
python
{ "resource": "" }
q15547
XMLToJSONTranslator.process_direct_map
train
def process_direct_map(self, root, state) -> List[Dict]: """Handles tags that are mapped directly from xml to IR with no additional processing other than recursive translation of any child nodes.""" val = {"tag": root.tag, "args": []} for node in root: val["args"] += self.parseTree(node, state) return [val]
python
{ "resource": "" }
q15548
XMLToJSONTranslator.parseTree
train
def parseTree(self, root, state: ParseState) -> List[Dict]: """ Parses the XML ast tree recursively to generate a JSON AST which can be ingested by other scripts to generate Python scripts. Args: root: The current root of the tree. state: The current state of the tree defined by an object of the ParseState class. Returns: ast: A JSON ast that defines the structure of the Fortran file. """ if root.tag in self.AST_TAG_HANDLERS: return self.AST_TAG_HANDLERS[root.tag](root, state) elif root.tag in self.libRtns: return self.process_libRtn(root, state) else: prog = [] for node in root: prog += self.parseTree(node, state) return prog
python
{ "resource": "" }
q15549
XMLToJSONTranslator.loadFunction
train
def loadFunction(self, root): """ Loads a list with all the functions in the Fortran File Args: root: The root of the XML ast tree. Returns: None Does not return anything but populates a list (self.functionList) that contains all the functions in the Fortran File. """ for element in root.iter(): if element.tag == "function": self.functionList.append(element.attrib["name"])
python
{ "resource": "" }
q15550
XMLToJSONTranslator.analyze
train
def analyze( self, trees: List[ET.ElementTree], comments: OrderedDict ) -> Dict: outputDict = {} ast = [] # Parse through the ast once to identify and grab all the functions # present in the Fortran file. for tree in trees: self.loadFunction(tree) # Parse through the ast tree a second time to convert the XML ast # format to a format that can be used to generate Python statements. for tree in trees: ast += self.parseTree(tree, ParseState()) """ Find the entry point for the Fortran file. The entry point for a conventional Fortran file is always the PROGRAM section. This 'if' statement checks for the presence of a PROGRAM segment. If not found, the entry point can be any of the functions or subroutines in the file. So, all the functions and subroutines of the program are listed and included as the possible entry point. """ if self.entryPoint: entry = {"program": self.entryPoint[0]} else: entry = {} if self.functionList: entry["function"] = self.functionList if self.subroutineList: entry["subroutine"] = self.subroutineList # Load the functions list and Fortran ast to a single data structure # which can be pickled and hence is portable across various scripts and # usages. outputDict["ast"] = ast outputDict["functionList"] = self.functionList outputDict["comments"] = comments return outputDict
python
{ "resource": "" }
q15551
construct_FAO_ontology
train
def construct_FAO_ontology(): """ Construct FAO variable ontology for use with Eidos. """ df = pd.read_csv("south_sudan_data_fao.csv") gb = df.groupby("Element") d = [ { "events": [ { k: [ {e: [process_variable_name(k, e)]} for e in list(set(gb.get_group(k)["Item"].tolist())) ] } for k in gb.groups.keys() ] } ] yaml = YAML() yaml.default_flow_style = False with open("fao_variable_ontology.yml", "w") as f: yaml.dump(d, f)
python
{ "resource": "" }
q15552
inspect_edge
train
def inspect_edge(G: AnalysisGraph, source: str, target: str): """ 'Drill down' into an edge in the analysis graph and inspect its provenance. This function prints the provenance. Args: G source target """ return create_statement_inspection_table( G[source][target]["InfluenceStatements"] )
python
{ "resource": "" }
q15553
_get_edge_sentences
train
def _get_edge_sentences( G: AnalysisGraph, source: str, target: str ) -> List[str]: """ Return the sentences that led to the construction of a specified edge. Args: G source: The source of the edge. target: The target of the edge. """ return chain.from_iterable( [ [repr(e.text) for e in s.evidence] for s in G.edges[source, target]["InfluenceStatements"] ] )
python
{ "resource": "" }
q15554
get_node_type
train
def get_node_type(type_str): """Returns the NodeType given a name of a JSON function object.""" if type_str == "container": return NodeType.CONTAINER elif type_str == "loop_plate": return NodeType.LOOP elif type_str == "assign": return NodeType.ASSIGN elif type_str == "condition": return NodeType.CONDITION elif type_str == "decision": return NodeType.DECISION else: raise ValueError("Unrecognized type string: ", type_str)
python
{ "resource": "" }
q15555
list_output_formats
train
def list_output_formats(type_list): """This function takes a list of type names and returns a list of format specifiers for list-directed output of values of those types.""" out_format_list = [] for type_item in type_list: item_format = default_output_format(type_item) out_format_list.append(item_format) return out_format_list
python
{ "resource": "" }
q15556
list_data_type
train
def list_data_type(type_list): """This function takes a list of format specifiers and returns a list of data types represented by the format specifiers.""" data_type = [] for item in type_list: match = re.match(r"(\d+)(.+)", item) if not match: reps = 1 if item[0] in "FfEegG": data_type.append("REAL") elif item[0] in "Ii": data_type.append("INTEGER") else: reps = match.group(1) fmt = match.group(2) if "(" in fmt and "," in fmt: fmt = fmt[1:-1].split(",") elif "(" in fmt: fmt = [fmt[1:-1]] else: fmt = [fmt] for i in range(int(reps)): for ft in fmt: if ft[0] in "FfEegG": data_type.append("REAL") elif ft[0] in "Ii": data_type.append("INTEGER") return data_type
python
{ "resource": "" }
q15557
Format.read_line
train
def read_line(self, line): """ Match a line of input according to the format specified and return a tuple of the resulting values """ if not self._read_line_init: self.init_read_line() match = self._re.match(line) assert match is not None, f"Format mismatch (line = {line})" matched_values = [] for i in range(self._re.groups): cvt_re = self._match_exps[i] cvt_div = self._divisors[i] cvt_fn = self._in_cvt_fns[i] match_str = match.group(i + 1) match0 = re.match(cvt_re, match_str) if match0 is not None: if cvt_fn == "float": if "." in match_str: val = float(match_str) else: val = int(match_str) / cvt_div elif cvt_fn == "int": val = int(match_str) else: sys.stderr.write( f"Unrecognized conversion function: {cvt_fn}\n" ) else: sys.stderr.write( f"Format conversion failed: {match_str}\n" ) matched_values.append(val) return tuple(matched_values)
python
{ "resource": "" }
q15558
Format.write_line
train
def write_line(self, values): """ Process a list of values according to the format specified to generate a line of output. """ if not self._write_line_init: self.init_write_line() if len(self._out_widths) > len(values): raise For2PyError(f"ERROR: too few values for format {self._format_list}\n") out_strs = [] for i in range(len(self._out_widths)): out_fmt = self._out_gen_fmt[i] out_width = self._out_widths[i] out_val = out_fmt.format(values[i]) if len(out_val) > out_width: # value too big for field out_val = "*" * out_width out_strs.append(out_val) out_str_exp = ( '"' + self._output_fmt + '".format' + str(tuple(out_strs)) ) out_str = eval(out_str_exp) return out_str + "\n"
python
{ "resource": "" }
q15559
get_variable_and_source
train
def get_variable_and_source(x: str): """ Process the variable name to make it more human-readable. """ xs = x.replace("\/", "|").split("/") xs = [x.replace("|", "/") for x in xs] if xs[0] == "FAO": return " ".join(xs[2:]), xs[0] else: return xs[-1], xs[0]
python
{ "resource": "" }
q15560
construct_concept_to_indicator_mapping
train
def construct_concept_to_indicator_mapping(n: int = 1) -> Dict[str, List[str]]: """ Create a dictionary mapping high-level concepts to low-level indicators Args: n: Number of indicators to return Returns: Dictionary that maps concept names to lists of indicator names. """ df = pd.read_sql_table("concept_to_indicator_mapping", con=engine) gb = df.groupby("Concept") _dict = { k: [get_variable_and_source(x) for x in take(n, v["Indicator"].values)] for k, v in gb } return _dict
python
{ "resource": "" }
q15561
Mortality.base_mortality_rate
train
def base_mortality_rate(self, index: pd.Index) -> pd.Series: """Computes the base mortality rate for every individual. Parameters ---------- index : A representation of the simulants to compute the base mortality rate for. Returns ------- The base mortality rate for all simulants in the index. """ return pd.Series(self.config.mortality_rate, index=index)
python
{ "resource": "" }
q15562
Mortality.determine_deaths
train
def determine_deaths(self, event: Event): """Determines who dies each time step. Parameters ---------- event : An event object emitted by the simulation containing an index representing the simulants affected by the event and timing information. """ effective_rate = self.mortality_rate(event.index) effective_probability = 1 - np.exp(-effective_rate) draw = self.randomness.get_draw(event.index) affected_simulants = draw < effective_probability self.population_view.update(pd.Series('dead', index=event.index[affected_simulants]))
python
{ "resource": "" }
q15563
_prep_components
train
def _prep_components(component_list: Sequence[str]) -> List[Tuple[str, Tuple[str]]]: """Transform component description strings into tuples of component paths and required arguments. Parameters ---------- component_list : The component descriptions to transform. Returns ------- List of component/argument tuples. """ components = [] for c in component_list: path, args_plus = c.split('(') cleaned_args = _clean_args(args_plus[:-1].split(','), path) components.append((path, cleaned_args)) return components
python
{ "resource": "" }
q15564
ComponentConfigurationParser.get_components
train
def get_components(self, component_config: Union[ConfigTree, List]) -> List: """Extracts component specifications from configuration information and returns initialized components. Parameters ---------- component_config : A hierarchical component specification blob. This configuration information needs to be parsable into a full import path and a set of initialization arguments by the ``parse_component_config`` method. Returns ------- List A list of initialized components. """ if isinstance(component_config, ConfigTree): component_list = self.parse_component_config(component_config.to_dict()) else: # Components were specified in a list rather than a tree. component_list = component_config component_list = _prep_components(component_list) return _import_and_instantiate_components(component_list)
python
{ "resource": "" }
q15565
ComponentConfigurationParser.parse_component_config
train
def parse_component_config(self, component_config: Dict[str, Union[Dict, List]]) -> List[str]: """Parses a hierarchical component specification into a list of standardized component definitions. This default parser expects component configurations as a list of dicts. Each dict at the top level corresponds to a different package and has a single key. This key may be just the name of the package or a Python style import path to the module in which components live. The values of the top level dicts are a list of dicts or strings. If dicts, the keys are another step along the import path. If strings, the strings are representations of calls to the class constructor of components to be generated. This pattern may be arbitrarily nested. Parameters ---------- component_config : A hierarchical component specification blob. Returns ------- List A list of standardized component definitions. Component definition strings are specified as ``'absolute.import.path.ClassName("argument1", "argument2", ...)'``. """ return _parse_component_config(component_config)
python
{ "resource": "" }
q15566
_next_state
train
def _next_state(index, event_time, transition_set, population_view): """Moves a population between different states using information from a `TransitionSet`. Parameters ---------- index : iterable of ints An iterable of integer labels for the simulants. event_time : pandas.Timestamp When this transition is occurring. transition_set : TransitionSet A set of potential transitions available to the simulants. population_view : vivarium.framework.population.PopulationView A view of the internal state of the simulation. """ if len(transition_set) == 0 or index.empty: return outputs, decisions = transition_set.choose_new_state(index) groups = _groupby_new_state(index, outputs, decisions) if groups: for output, affected_index in sorted(groups, key=lambda x: str(x[0])): if output == 'null_transition': pass elif isinstance(output, Transient): if not isinstance(output, State): raise ValueError('Invalid transition output: {}'.format(output)) output.transition_effect(affected_index, event_time, population_view) output.next_state(affected_index, event_time, population_view) elif isinstance(output, State): output.transition_effect(affected_index, event_time, population_view) else: raise ValueError('Invalid transition output: {}'.format(output))
python
{ "resource": "" }
q15567
_groupby_new_state
train
def _groupby_new_state(index, outputs, decisions): """Groups the simulants in the index by their new output state. Parameters ---------- index : iterable of ints An iterable of integer labels for the simulants. outputs : iterable A list of possible output states. decisions : `pandas.Series` A series containing the name of the next state for each simulant in the index. Returns ------- iterable of 2-tuples The first item in each tuple is the name of an output state and the second item is a `pandas.Index` representing the simulants to transition into that state. """ output_map = {o: i for i, o in enumerate(outputs)} groups = pd.Series(index).groupby([output_map[d] for d in decisions]) results = [(outputs[i], pd.Index(sub_group.values)) for i, sub_group in groups] selected_outputs = [o for o, _ in results] for output in outputs: if output not in selected_outputs: results.append((output, pd.Index([]))) return results
python
{ "resource": "" }
q15568
State.next_state
train
def next_state(self, index, event_time, population_view): """Moves a population between different states using information this state's `transition_set`. Parameters ---------- index : iterable of ints An iterable of integer labels for the simulants. event_time : pandas.Timestamp When this transition is occurring. population_view : vivarium.framework.population.PopulationView A view of the internal state of the simulation. """ return _next_state(index, event_time, self.transition_set, population_view)
python
{ "resource": "" }
q15569
State.transition_effect
train
def transition_effect(self, index, event_time, population_view): """Updates the simulation state and triggers any side-effects associated with entering this state. Parameters ---------- index : iterable of ints An iterable of integer labels for the simulants. event_time : pandas.Timestamp The time at which this transition occurs. population_view : `vivarium.framework.population.PopulationView` A view of the internal state of the simulation. """ population_view.update(pd.Series(self.state_id, index=index)) self._transition_side_effect(index, event_time)
python
{ "resource": "" }
q15570
State.add_transition
train
def add_transition(self, output, probability_func=lambda index: np.ones(len(index), dtype=float), triggered=Trigger.NOT_TRIGGERED): """Builds a transition from this state to the given state. output : State The end state after the transition. """ t = Transition(self, output, probability_func=probability_func, triggered=triggered) self.transition_set.append(t) return t
python
{ "resource": "" }
q15571
TransitionSet.choose_new_state
train
def choose_new_state(self, index): """Chooses a new state for each simulant in the index. Parameters ---------- index : iterable of ints An iterable of integer labels for the simulants. Returns ------- outputs : list The possible end states of this set of transitions. decisions: `pandas.Series` A series containing the name of the next state for each simulant in the index. """ outputs, probabilities = zip(*[(transition.output_state, np.array(transition.probability(index))) for transition in self.transitions]) probabilities = np.transpose(probabilities) outputs, probabilities = self._normalize_probabilities(outputs, probabilities) return outputs, self.random.choice(index, outputs, probabilities)
python
{ "resource": "" }
q15572
TransitionSet._normalize_probabilities
train
def _normalize_probabilities(self, outputs, probabilities): """Normalize probabilities to sum to 1 and add a null transition if desired. Parameters ---------- outputs : iterable List of possible end states corresponding to this containers transitions. probabilities : iterable of iterables A set of probability weights whose columns correspond to the end states in `outputs` and whose rows correspond to each simulant undergoing the transition. Returns ------- outputs: list The original output list expanded to include a null transition (a transition back to the starting state) if requested. probabilities : ndarray The original probabilities rescaled to sum to 1 and potentially expanded to include a null transition weight. """ outputs = list(outputs) total = np.sum(probabilities, axis=1) if self.allow_null_transition or not np.any(total): if np.any(total > 1+1e-08): # Accommodate rounding errors raise ValueError( "Null transition requested with un-normalized probability weights: {}".format(probabilities)) total[total > 1] = 1 # Correct allowed rounding errors. probabilities = np.concatenate([probabilities, (1-total)[:, np.newaxis]], axis=1) outputs.append('null_transition') return outputs, probabilities/(np.sum(probabilities, axis=1)[:, np.newaxis])
python
{ "resource": "" }
q15573
Machine.transition
train
def transition(self, index, event_time): """Finds the population in each state and moves them to the next state. Parameters ---------- index : iterable of ints An iterable of integer labels for the simulants. event_time : pandas.Timestamp The time at which this transition occurs. """ for state, affected in self._get_state_pops(index): if not affected.empty: state.next_state(affected.index, event_time, self.population_view.subview([self.state_column]))
python
{ "resource": "" }
q15574
Machine.to_dot
train
def to_dot(self): """Produces a ball and stick graph of this state machine. Returns ------- `graphviz.Digraph` A ball and stick visualization of this state machine. """ from graphviz import Digraph dot = Digraph(format='png') for state in self.states: if isinstance(state, TransientState): dot.node(state.state_id, style='dashed') else: dot.node(state.state_id) for transition in state.transition_set: dot.edge(state.state_id, transition.output.state_id, transition.label()) return dot
python
{ "resource": "" }
q15575
ComponentManager.setup_components
train
def setup_components(self, builder, configuration): """Apply component level configuration defaults to the global config and run setup methods on the components registering and setting up any child components generated in the process. Parameters ---------- builder: Interface to several simulation tools. configuration: Simulation configuration parameters. """ self._managers = _setup_components(builder, self._managers, configuration) self._components = _setup_components(builder, self._components, configuration)
python
{ "resource": "" }
q15576
field_to_markdown
train
def field_to_markdown(field): """Genera texto en markdown a partir de los metadatos de un `field`. Args: field (dict): Diccionario con metadatos de un `field`. Returns: str: Texto que describe un `field`. """ if "title" in field: field_title = "**{}**".format(field["title"]) else: raise Exception("Es necesario un `title` para describir un campo.") field_type = " ({})".format(field["type"]) if "type" in field else "" field_desc = ": {}".format( field["description"]) if "description" in field else "" text_template = "{title}{type}{description}" text = text_template.format(title=field_title, type=field_type, description=field_desc) return text
python
{ "resource": "" }
q15577
DataJson._build_index
train
def _build_index(self): """Itera todos los datasets, distribucioens y fields indexandolos.""" datasets_index = {} distributions_index = {} fields_index = {} # recorre todos los datasets for dataset_index, dataset in enumerate(self.datasets): if "identifier" in dataset: datasets_index[dataset["identifier"]] = { "dataset_index": dataset_index } # recorre las distribuciones del dataset for distribution_index, distribution in enumerate( dataset.get("distribution", [])): if "identifier" in distribution: distributions_index[distribution["identifier"]] = { "distribution_index": distribution_index, "dataset_identifier": dataset["identifier"] } # recorre los fields de la distribucion for field_index, field in enumerate( distribution.get("field", [])): if "id" in field: fields_index[field["id"]] = { "field_index": field_index, "dataset_identifier": dataset["identifier"], "distribution_identifier": distribution["identifier"] } setattr(self, "_distributions_index", distributions_index) setattr(self, "_datasets_index", datasets_index) setattr(self, "_fields_index", fields_index)
python
{ "resource": "" }
q15578
import_by_path
train
def import_by_path(path: str) -> Callable: """Import a class or function given it's absolute path. Parameters ---------- path: Path to object to import """ module_path, _, class_name = path.rpartition('.') return getattr(import_module(module_path), class_name)
python
{ "resource": "" }
q15579
_set_default_value
train
def _set_default_value(dict_obj, keys, value): """Setea valor en diccionario anidado, siguiendo lista de keys. Args: dict_obj (dict): Un diccionario anidado. keys (list): Una lista de keys para navegar el diccionario. value (any): Un valor para reemplazar. """ variable = dict_obj if len(keys) == 1: if not variable.get(keys[0]): variable[keys[0]] = value else: for idx, field in enumerate(keys): if idx < len(keys) - 1: variable = variable[field] if not variable.get(keys[-1]): variable[keys[-1]] = value
python
{ "resource": "" }
q15580
_make_contact_point
train
def _make_contact_point(dataset): """De estar presentes las claves necesarias, genera el diccionario "contactPoint" de un dataset.""" keys = [k for k in ["contactPoint_fn", "contactPoint_hasEmail"] if k in dataset] if keys: dataset["contactPoint"] = { key.replace("contactPoint_", ""): dataset.pop(key) for key in keys } return dataset
python
{ "resource": "" }
q15581
_read_csv_table
train
def _read_csv_table(path): """Lee un CSV a una lista de diccionarios.""" with open(path, 'rb') as csvfile: reader = csv.DictReader(csvfile) table = list(reader) return table
python
{ "resource": "" }
q15582
_read_xlsx_table
train
def _read_xlsx_table(path): """Lee la hoja activa de un archivo XLSX a una lista de diccionarios.""" workbook = pyxl.load_workbook(path) worksheet = workbook.active table = helpers.sheet_to_table(worksheet) return table
python
{ "resource": "" }
q15583
validate_model_specification_file
train
def validate_model_specification_file(file_path: str) -> str: """Ensures the provided file is a yaml file""" if not os.path.isfile(file_path): raise ConfigurationError('If you provide a model specification file, it must be a file. ' f'You provided {file_path}') extension = file_path.split('.')[-1] if extension not in ['yaml', 'yml']: raise ConfigurationError(f'Model specification files must be in a yaml format. You provided {extension}') # Attempt to load yaml.full_load(file_path) return file_path
python
{ "resource": "" }
q15584
ConfigNode.get_value_with_source
train
def get_value_with_source(self, layer=None): """Returns a tuple of the value's source and the value at the specified layer. If no layer is specified then the outer layer is used. Parameters ---------- layer : str Name of the layer to use. If None then the outermost where the value exists will be used. Raises ------ KeyError If the value is not set for the specified layer """ if layer: return self._values[layer] for layer in reversed(self._layers): if layer in self._values: return self._values[layer] raise KeyError(layer)
python
{ "resource": "" }
q15585
ConfigNode.metadata
train
def metadata(self): """Returns all values and associated metadata for this node as a dict. The value which would be selected if the node's value was requested is indicated by the `default` flag. """ result = [] for layer in self._layers: if layer in self._values: result.append({ 'layer': layer, 'value': self._values[layer][1], 'source': self._values[layer][0], 'default': layer == self._layers[-1] }) return result
python
{ "resource": "" }
q15586
ConfigNode.set_value
train
def set_value(self, value, layer=None, source=None): """Set a value for a particular layer with optional metadata about source. Parameters ---------- value : str Data to store in the node. layer : str Name of the layer to use. If None then the outermost where the value exists will be used. source : str Metadata indicating the source of this value (e.g. a file path) Raises ------ TypeError If the node is frozen KeyError If the named layer does not exist """ if self._frozen: raise TypeError('Frozen ConfigNode does not support assignment') if not layer: layer = self._layers[-1] self._values[layer] = (source, value)
python
{ "resource": "" }
q15587
ConfigTree.freeze
train
def freeze(self): """Causes the ConfigTree to become read only. This is useful for loading and then freezing configurations that should not be modified at runtime. """ self.__dict__['_frozen'] = True for child in self._children.values(): child.freeze()
python
{ "resource": "" }
q15588
ConfigTree.get_from_layer
train
def get_from_layer(self, name, layer=None): """Get a configuration value from the named layer. Parameters ---------- name : str The name of the value to retrieve layer: str The name of the layer to retrieve the value from. If it is not supplied then the outermost layer in which the key is defined will be used. """ if name not in self._children: if self._frozen: raise KeyError(name) self._children[name] = ConfigTree(layers=self._layers) child = self._children[name] if isinstance(child, ConfigNode): return child.get_value(layer) else: return child
python
{ "resource": "" }
q15589
ConfigTree._set_with_metadata
train
def _set_with_metadata(self, name, value, layer=None, source=None): """Set a value in the named layer with the given source. Parameters ---------- name : str The name of the value value The value to store layer : str, optional The name of the layer to store the value in. If none is supplied then the value will be stored in the outermost layer. source : str, optional The source to attribute the value to. Raises ------ TypeError if the ConfigTree is frozen """ if self._frozen: raise TypeError('Frozen ConfigTree does not support assignment') if isinstance(value, dict): if name not in self._children or not isinstance(self._children[name], ConfigTree): self._children[name] = ConfigTree(layers=list(self._layers)) self._children[name].update(value, layer, source) else: if name not in self._children or not isinstance(self._children[name], ConfigNode): self._children[name] = ConfigNode(list(self._layers)) child = self._children[name] child.set_value(value, layer, source)
python
{ "resource": "" }
q15590
ConfigTree.update
train
def update(self, data: Union[Mapping, str, bytes], layer: str=None, source: str=None): """Adds additional data into the ConfigTree. Parameters ---------- data : source data layer : layer to load data into. If none is supplied the outermost one is used source : Source to attribute the values to See Also -------- read_dict """ if isinstance(data, dict): self._read_dict(data, layer, source) elif isinstance(data, ConfigTree): # TODO: set this to parse the other config tree including layer and source info. Maybe. self._read_dict(data.to_dict(), layer, source) elif isinstance(data, str): if data.endswith(('.yaml', '.yml')): source = source if source else data self._load(data, layer, source) else: try: self._loads(data, layer, source) except AttributeError: raise ValueError("The string data should be yaml formated string or path to .yaml/.yml file") elif data is None: pass else: raise ValueError(f"Update must be called with dictionary, string, or ConfigTree. " f"You passed in {type(data)}")
python
{ "resource": "" }
q15591
ConfigTree._read_dict
train
def _read_dict(self, data_dict, layer=None, source=None): """Load a dictionary into the ConfigTree. If the dict contains nested dicts then the values will be added recursively. See module docstring for example code. Parameters ---------- data_dict : dict source data layer : str layer to load data into. If none is supplied the outermost one is used source : str Source to attribute the values to """ for k, v in data_dict.items(): self._set_with_metadata(k, v, layer, source)
python
{ "resource": "" }
q15592
ConfigTree._loads
train
def _loads(self, data_string, layer=None, source=None): """Load data from a yaml formatted string. Parameters ---------- data_string : str yaml formatted string. The root element of the document should be an associative array layer : str layer to load data into. If none is supplied the outermost one is used source : str Source to attribute the values to """ data_dict = yaml.full_load(data_string) self._read_dict(data_dict, layer, source)
python
{ "resource": "" }
q15593
ConfigTree._load
train
def _load(self, f, layer=None, source=None): """Load data from a yaml formatted file. Parameters ---------- f : str or file like object If f is a string then it is interpreted as a path to the file to load If it is a file like object then data is read directly from it. layer : str layer to load data into. If none is supplied the outermost one is used source : str Source to attribute the values to """ if hasattr(f, 'read'): self._loads(f.read(), layer=layer, source=source) else: with open(f) as f: self._loads(f.read(), layer=layer, source=source)
python
{ "resource": "" }
q15594
ConfigTree.metadata
train
def metadata(self, name): """Return value and metadata associated with the named value Parameters ---------- name : str name to retrieve. If the name contains '.'s it will be retrieved recursively Raises ------ KeyError if name is not defined in the ConfigTree """ if name in self._children: return self._children[name].metadata() else: head, _, tail = name.partition('.') if head in self._children: return self._children[head].metadata(key=tail) else: raise KeyError(name)
python
{ "resource": "" }
q15595
ConfigTree.unused_keys
train
def unused_keys(self): """Lists all keys which are present in the ConfigTree but which have not been accessed.""" unused = set() for k, c in self._children.items(): if isinstance(c, ConfigNode): if not c.has_been_accessed(): unused.add(k) else: for ck in c.unused_keys(): unused.add(k+'.'+ck) return unused
python
{ "resource": "" }
q15596
create_validator
train
def create_validator(schema_filename=None, schema_dir=None): """Crea el validador necesario para inicializar un objeto DataJson. Para poder resolver referencias inter-esquemas, un Validador requiere que se especifique un RefResolver (Resolvedor de Referencias) con el directorio base (absoluto) y el archivo desde el que se referencia el directorio. Para poder validar formatos, un Validador requiere que se provea explícitamente un FormatChecker. Actualmente se usa el default de la librería, jsonschema.FormatChecker(). Args: schema_filename (str): Nombre del archivo que contiene el esquema validador "maestro". schema_dir (str): Directorio (absoluto) donde se encuentra el esquema validador maestro y sus referencias, de tenerlas. Returns: Draft4Validator: Un validador de JSONSchema Draft #4. El validador se crea con un RefResolver que resuelve referencias de `schema_filename` dentro de `schema_dir`. """ schema_filename = schema_filename or DEFAULT_CATALOG_SCHEMA_FILENAME schema_dir = schema_dir or ABSOLUTE_SCHEMA_DIR schema_path = os.path.join(schema_dir, schema_filename) schema = readers.read_json(schema_path) # Según https://github.com/Julian/jsonschema/issues/98 # Permite resolver referencias locales a otros esquemas. if platform.system() == 'Windows': base_uri = "file:///" + schema_path.replace("\\", "/") else: base_uri = "file://" + schema_path resolver = jsonschema.RefResolver(base_uri=base_uri, referrer=schema) format_checker = jsonschema.FormatChecker() validator = jsonschema.Draft4Validator( schema=schema, resolver=resolver, format_checker=format_checker) return validator
python
{ "resource": "" }
q15597
initialize_simulation
train
def initialize_simulation(components: List, input_config: Mapping=None, plugin_config: Mapping=None) -> InteractiveContext: """Construct a simulation from a list of components, component configuration, and a plugin configuration. The simulation context returned by this method still needs to be setup by calling its setup method. It is mostly useful for testing and debugging. Parameters ---------- components A list of initialized simulation components. Corresponds to the components block of a model specification. input_config A nested dictionary with any additional simulation configuration information needed. Corresponds to the configuration block of a model specification. plugin_config A dictionary containing a description of any simulation plugins to include in the simulation. If you're using this argument, you're either deep in the process of simulation development or the maintainers have done something wrong. Corresponds to the plugins block of a model specification. Returns ------- An initialized (but not set up) simulation context. """ config = build_simulation_configuration() config.update(input_config) plugin_manager = PluginManager(plugin_config) return InteractiveContext(config, components, plugin_manager)
python
{ "resource": "" }
q15598
setup_simulation
train
def setup_simulation(components: List, input_config: Mapping=None, plugin_config: Mapping=None) -> InteractiveContext: """Construct a simulation from a list of components and call its setup method. Parameters ---------- components A list of initialized simulation components. Corresponds to the components block of a model specification. input_config A nested dictionary with any additional simulation configuration information needed. Corresponds to the configuration block of a model specification. plugin_config A dictionary containing a description of any simulation plugins to include in the simulation. If you're using this argument, you're either deep in the process of simulation development or the maintainers have done something wrong. Corresponds to the plugins block of a model specification. Returns ------- A simulation context that is setup and ready to run. """ simulation = initialize_simulation(components, input_config, plugin_config) simulation.setup() return simulation
python
{ "resource": "" }
q15599
initialize_simulation_from_model_specification
train
def initialize_simulation_from_model_specification(model_specification_file: str) -> InteractiveContext: """Construct a simulation from a model specification file. The simulation context returned by this method still needs to be setup by calling its setup method. It is mostly useful for testing and debugging. Parameters ---------- model_specification_file The path to a model specification file. Returns ------- An initialized (but not set up) simulation context. """ model_specification = build_model_specification(model_specification_file) plugin_config = model_specification.plugins component_config = model_specification.components simulation_config = model_specification.configuration plugin_manager = PluginManager(plugin_config) component_config_parser = plugin_manager.get_plugin('component_configuration_parser') components = component_config_parser.get_components(component_config) return InteractiveContext(simulation_config, components, plugin_manager)
python
{ "resource": "" }