code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if not isinstance(mesh, trimesh.Trimesh): raise ValueError('Must provide a trimesh.Trimesh object') mp = MaterialProperties( color = np.array(color), k_a = 0.5, k_d = 0.3, k_s = 0.1, alpha = 10.0, smooth=smooth, wireframe=(style == 'wireframe') ) obj = SceneObject(mesh, T_mesh_world, mp) if name is None: name = str(uuid.uuid4()) Visualizer3D._scene.add_object(name, obj)
def mesh(mesh, T_mesh_world=RigidTransform(from_frame='obj', to_frame='world'), style='surface', smooth=False, color=(0.5,0.5,0.5), name=None)
Visualize a 3D triangular mesh. Parameters ---------- mesh : trimesh.Trimesh The mesh to visualize. T_mesh_world : autolab_core.RigidTransform The pose of the mesh, specified as a transformation from mesh frame to world frame. style : str Triangular mesh style, either 'surface' or 'wireframe'. smooth : bool If true, the mesh is smoothed before rendering. color : 3-tuple Color tuple. name : str A name for the object to be added.
3.456236
3.537927
0.97691
T_obj_table = T_obj_table.as_frames('obj', 'table') T_obj_world = T_table_world * T_obj_table Visualizer3D.mesh(mesh, T_obj_world, style=style, smooth=smooth, color=color, name=name) if plot_table: Visualizer3D.table(T_table_world, dim=dim) if plot_com: Visualizer3D.points(Point(np.array(mesh.center_mass), 'obj'), T_obj_world, scale=0.01) return T_obj_world
def mesh_stable_pose(mesh, T_obj_table, T_table_world=RigidTransform(from_frame='table', to_frame='world'), style='wireframe', smooth=False, color=(0.5,0.5,0.5), dim=0.15, plot_table=True, plot_com=False, name=None)
Visualize a mesh in a stable pose. Parameters ---------- mesh : trimesh.Trimesh The mesh to visualize. T_obj_table : autolab_core.RigidTransform Pose of object relative to table. T_table_world : autolab_core.RigidTransform Pose of table relative to world. style : str Triangular mesh style, either 'surface' or 'wireframe'. smooth : bool If true, the mesh is smoothed before rendering. color : 3-tuple Color tuple. dim : float The side-length for the table. plot_table : bool If true, a table is visualized as well. plot_com : bool If true, a ball is visualized at the object's center of mass. name : str A name for the object to be added. Returns ------- autolab_core.RigidTransform The pose of the mesh in world frame.
2.399166
2.862827
0.838041
R = T_frame_world.rotation t = T_frame_world.translation x_axis_tf = np.array([t, t + alpha * R[:,0]]) y_axis_tf = np.array([t, t + alpha * R[:,1]]) z_axis_tf = np.array([t, t + alpha * R[:,2]]) Visualizer3D.points(t, color=(1,1,1), scale=center_scale) Visualizer3D.plot3d(x_axis_tf, color=(1,0,0), tube_radius=tube_radius) Visualizer3D.plot3d(y_axis_tf, color=(0,1,0), tube_radius=tube_radius) Visualizer3D.plot3d(z_axis_tf, color=(0,0,1), tube_radius=tube_radius)
def pose(T_frame_world, alpha=0.1, tube_radius=0.005, center_scale=0.01)
Plot a 3D pose as a set of axes (x red, y green, z blue). Parameters ---------- T_frame_world : autolab_core.RigidTransform The pose relative to world coordinates. alpha : float Length of plotted x,y,z axes. tube_radius : float Radius of plotted x,y,z axes. center_scale : float Radius of the pose's origin ball.
1.849384
1.824146
1.013836
table_vertices = np.array([[ dim, dim, 0], [ dim, -dim, 0], [-dim, dim, 0], [-dim, -dim, 0]]).astype('float') table_tris = np.array([[0, 1, 2], [1, 2, 3]]) table_mesh = trimesh.Trimesh(table_vertices, table_tris) table_mesh.apply_transform(T_table_world.matrix) Visualizer3D.mesh(table_mesh, style='surface', smooth=True, color=color)
def table(T_table_world=RigidTransform(from_frame='table', to_frame='world'), dim=0.16, color=(0,0,0))
Plot a table mesh in 3D. Parameters ---------- T_table_world : autolab_core.RigidTransform Pose of table relative to world. dim : float The side-length for the table. color : 3-tuple Color tuple.
2.586938
2.625041
0.985485
points = np.asanyarray(points) mp = MaterialProperties( color = np.array(color), k_a = 0.5, k_d = 0.3, k_s = 0.0, alpha = 10.0, smooth=True ) # Generate circular polygon vec = np.array([0,1]) * tube_radius angle = np.pi * 2.0 / n_components rotmat = np.array([ [np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)] ]) perim = [] for i in range(n_components): perim.append(vec) vec = np.dot(rotmat, vec) poly = Polygon(perim) # Sweep it out along the path mesh = trimesh.creation.sweep_polygon(poly, points) obj = SceneObject(mesh, material=mp) if name is None: name = str(uuid.uuid4()) Visualizer3D._scene.add_object(name, obj)
def plot3d(points, color=(0.5, 0.5, 0.5), tube_radius=0.005, n_components=30, name=None)
Plot a 3d curve through a set of points using tubes. Parameters ---------- points : (n,3) float A series of 3D points that define a curve in space. color : (3,) float The color of the tube. tube_radius : float Radius of tube representing curve. n_components : int The number of edges in each polygon representing the tube. name : str A name for the object to be added.
3.393495
3.398594
0.9985
return plt.figure(figsize=size, *args, **kwargs)
def figure(size=(8,8), *args, **kwargs)
Creates a figure. Parameters ---------- size : 2-tuple size of the view window in inches args : list args of mayavi figure kwargs : list keyword args of mayavi figure Returns ------- pyplot figure the current figure
3.143019
7.866836
0.399528
if filename is None: plt.show(*args, **kwargs) else: plt.savefig(filename, *args, **kwargs)
def show(filename=None, *args, **kwargs)
Show the current figure. Parameters ---------- filename : :obj:`str` filename to save the image to, for auto-saving
2.156195
2.725645
0.791077
if isinstance(image, BinaryImage) or isinstance(image, GrayscaleImage): plt.imshow(image.data, cmap=plt.cm.gray, **kwargs) elif isinstance(image, ColorImage) or isinstance(image, SegmentationImage): plt.imshow(image.data, **kwargs) elif isinstance(image, DepthImage): plt.imshow(image.data, cmap=plt.cm.gray_r, **kwargs) elif isinstance(image, RgbdImage): if auto_subplot: plt.subplot(1,2,1) plt.imshow(image.color.data, **kwargs) plt.axis('off') plt.subplot(1,2,2) plt.imshow(image.depth.data, cmap=plt.cm.gray_r, **kwargs) else: plt.imshow(image.color.data, **kwargs) elif isinstance(image, GdImage): if auto_subplot: plt.subplot(1,2,1) plt.imshow(image.gray.data, cmap=plt.cm.gray, **kwargs) plt.axis('off') plt.subplot(1,2,2) plt.imshow(image.depth.data, cmap=plt.cm.gray_r, **kwargs) else: plt.imshow(image.gray.data, cmap=plt.cm.gray, **kwargs) plt.axis('off')
def imshow(image, auto_subplot=False, **kwargs)
Displays an image. Parameters ---------- image : :obj:`perception.Image` image to display auto_subplot : bool whether or not to automatically subplot for multi-channel images e.g. rgbd
1.525578
1.486343
1.026396
if not isinstance(b, Box): raise ValueError('Input must be of type Box') # get min pixels min_i = b.min_pt[1] min_j = b.min_pt[0] max_i = b.max_pt[1] max_j = b.max_pt[0] top_left = np.array([min_i, min_j]) top_right = np.array([max_i, min_j]) bottom_left = np.array([min_i, max_j]) bottom_right = np.array([max_i, max_j]) # create lines left = np.c_[top_left, bottom_left].T right = np.c_[top_right, bottom_right].T top = np.c_[top_left, top_right].T bottom = np.c_[bottom_left, bottom_right].T # plot lines plt.plot(left[:,0], left[:,1], linewidth=line_width, color=color, linestyle=style) plt.plot(right[:,0], right[:,1], linewidth=line_width, color=color, linestyle=style) plt.plot(top[:,0], top[:,1], linewidth=line_width, color=color, linestyle=style) plt.plot(bottom[:,0], bottom[:,1], linewidth=line_width, color=color, linestyle=style)
def box(b, line_width=2, color='g', style='-')
Draws a box on the current plot. Parameters ---------- b : :obj:`autolab_core.Box` box to draw line_width : int width of lines on side of box color : :obj:`str` color of box style : :obj:`str` style of lines to draw
1.600614
1.63131
0.981183
if not isinstance(c, Contour): raise ValueError('Input must be of type Contour') for i in range(c.num_pixels)[0::subsample]: plt.scatter(c.boundary_pixels[i,1], c.boundary_pixels[i,0], s=size, c=color)
def contour(c, subsample=1, size=10, color='g')
Draws a contour on the current plot by scattering points. Parameters ---------- c : :obj:`autolab_core.Contour` contour to draw subsample : int subsample rate for boundary pixels size : int size of scattered points color : :obj:`str` color of box
3.935412
3.800831
1.035408
idx_list = [] for i in range(len(bounds)): this_dim = bounds[i] lo,hi = this_dim[0],this_dim[1] # bounds for this dimension this_dim_idxs = range(lo,hi+1) # indexes for this dimension idx_list.append(this_dim_idxs) return idx2subs(idx_list)
def all_subs(bounds)
given a list of tuples specifying the bounds of an array, all_subs() returns a list of all the tuples of subscripts for that array.
3.338819
3.366642
0.991735
if not idx_list: return [()] return [items + (item,) for items in idx2subs(idx_list[:-1]) for item in idx_list[-1]]
def idx2subs(idx_list)
Given a list idx_list of index values for each dimension of an array, idx2subs() returns a list of the tuples of subscripts for all of the array elements specified by those index values. Note: This code adapted from that posted by jfs at https://stackoverflow.com/questions/533905/get-the-cartesian-product- of-a-series-of-lists
4.699176
4.444042
1.05741
if isinstance(expr, Array): return expr.get_elems(all_subs(expr._bounds)) elif isinstance(expr, list): vals = [array_values(x) for x in expr] return flatten(vals) else: return [expr]
def array_values(expr)
Given an expression expr denoting a list of values, array_values(expr) returns a list of values for that expression.
4.217709
4.519846
0.933153
if isinstance(expr, Array): return all_subs(expr._bounds) elif isinstance(expr, list): subs = [subscripts(x) for x in expr] return flatten(subs) else: return [expr]
def array_subscripts(expr)
Given a subscript expression expr (i.e., an expression that denotes the set of elements of some array that are to be accessed), array_subscripts() returns a list of the elements denoted by expr.
3.876451
4.419535
0.877117
out_list = [] for val in in_list: if isinstance(val, list): out_list.extend(val) else: out_list.append(val) return out_list
def flatten(in_list)
given a list of values in_list, flatten returns the list obtained by flattening the top-level elements of in_list.
1.836425
2.049002
0.896254
if delta > 0: stop = end+1 else: stop = end-1 result_list = [expr(x) for x in range(start,stop,delta)] # return the flattened list of results return list(itertools.chain(result_list))
def implied_loop_expr(expr, start, end, delta)
given the parameters of an implied loop -- namely, the start and end values together with the delta per iteration -- implied_loop_expr() returns a list of values of the lambda expression expr applied to successive values of the implied loop.
3.689013
4.174087
0.883789
if len(bounds) == 0: raise For2PyError("Zero-length arrays current not handled!.") this_dim = bounds[0] lo,hi = this_dim[0],this_dim[1] sz = hi-lo+1 if len(bounds) == 1: return [None] * sz sub_array = self._mk_uninit_array(bounds[1:]) this_array = [copy.deepcopy(sub_array) for i in range(sz)] return this_array
def _mk_uninit_array(self, bounds)
given a list of bounds for the N dimensions of an array, _mk_uninit_array() creates and returns an N-dimensional array of the size specified by the bounds with each element set to the value None.
4.503263
4.373786
1.029603
lo,hi = bounds[0],bounds[1] assert idx >= lo and idx <= hi, \ f"Array index {idx} out of bounds: {bounds}\n" return idx-lo
def _posn(self, bounds, idx)
given bounds = (lo,hi) and an index value idx, _posn(bounds, idx) returns the position in a 0-based array corresponding to idx in the (lo,hi)-based array. It generates an error if idx < lo or idx > hi.
5.719166
4.836304
1.182549
if isinstance(subs, int): # if subs is just an integer, take it to be an index value. subs = (subs,) if len(subs) == 0: raise For2PyError("Zero-length arrays currently not handled.") bounds = self._bounds sub_arr = self._values ndims = len(subs) for i in range(ndims): this_pos = self._posn(bounds[i], subs[i]) if i == ndims-1: if acc_type == _GET_: return sub_arr[this_pos] else: sub_arr[this_pos] = val else: sub_arr = sub_arr[this_pos]
def _access(self, subs, acc_type, val)
_access(subs, acc_type, val) accesses the array element specified by the tuple of subscript values, subs. If acc_type == _GET_ it returns the value of this element; else it sets this element to the value of the argument val.
4.615295
3.987052
1.157571
if isinstance(vals, (int, float)): # if vals is a scalar, extend it to a list of appropriate length vals = [vals] * len(subs) for i in range(len(subs)): self.set_(subs[i], vals[i])
def set_elems(self, subs, vals)
set_elems(subs, vals) sets the array elements specified by the list of subscript values subs (each element of subs is a tuple of subscripts identifying an array element) to the corresponding value in vals.
3.828466
3.872302
0.98868
# Create a regular expression from all of the dictionary keys regex = re.compile("|".join(map(re.escape, d.keys()))) # For each match, look up the corresponding value in the dictionary return regex.sub(lambda match: d[match.group(0)], text)
def multiple_replace(d: Dict[str, str], text: str) -> str
Performs string replacement from dict in a single pass. Taken from https://www.oreilly.com/library/view/python-cookbook/0596001673/ch03s15.html
2.779007
2.633297
1.055334
with open(input, "rb") as f: G = pickle.load(f) G.parameterize(year=2017, month=4) G.get_timeseries_values_for_indicators() with open(output, "wb") as f: pickle.dump(G, f)
def create_parameterized_CAG(input, output, filename="CAG_with_indicators_and_values.pdf")
Create a CAG with mapped and parameterized indicators
3.743138
3.58232
1.044892
return set(flatMap(nameTuple, sts))
def get_concepts(sts: List[Influence]) -> Set[str]
Get a set of all unique concepts in the list of INDRA statements.
47.264023
42.359104
1.115794
return [ s for s in sts if is_grounded_statement(s) and (s.subj_delta["polarity"] is not None) and (s.obj_delta["polarity"] is not None) ]
def get_valid_statements_for_modeling(sts: List[Influence]) -> List[Influence]
Select INDRA statements that can be used to construct a Delphi model from a given list of statements.
4.202281
4.605965
0.912356
return is_grounded(s.subj) and is_grounded(s.obj)
def _(s: Influence) -> bool
Check if an Influence statement is grounded
6.981184
3.631303
1.922501
return is_grounded_concept(s.subj) and is_grounded_concept(s.obj)
def is_grounded_statement(s: Influence) -> bool
Check if an Influence statement is grounded
4.901222
3.360173
1.458622
return is_grounded(c) and (top_grounding_score(c) >= cutoff)
def _(c: Concept, cutoff: float = 0.7) -> bool
Check if a concept has a high grounding score.
10.358525
5.403523
1.916995
return all(map(lambda c: is_well_grounded(c, cutoff), s.agent_list()))
def _(s: Influence, cutoff: float = 0.7) -> bool
Returns true if both subj and obj are grounded to the UN ontology.
15.860483
8.233821
1.92626
return is_grounded(c) and (top_grounding_score(c) >= cutoff)
def is_well_grounded_concept(c: Concept, cutoff: float = 0.7) -> bool
Check if a concept has a high grounding score.
8.009078
4.362353
1.835954
return all( map(lambda c: is_well_grounded_concept(c, cutoff), s.agent_list()) )
def is_well_grounded_statement(s: Influence, cutoff: float = 0.7) -> bool
Returns true if both subj and obj are grounded to the UN ontology.
7.037266
5.00788
1.405239
return (top_grounding(c) == name) if is_well_grounded(c, cutoff) else False
def is_grounded_to_name(c: Concept, name: str, cutoff=0.7) -> bool
Check if a concept is grounded to a given name.
9.135172
6.241516
1.463614
return any( map(lambda c: contains_concept(s, c, cutoff=cutoff), relevant_concepts) )
def contains_relevant_concept( s: Influence, relevant_concepts: List[str], cutoff=0.7 ) -> bool
Returns true if a given Influence statement has a relevant concept, and false otherwise.
4.670257
4.472081
1.044314
return c.db_refs["UN"][0][0] if "UN" in c.db_refs else c.name
def top_grounding(c: Concept) -> str
Return the top-scoring grounding from the UN ontology.
7.390751
4.97326
1.486098
return top_grounding(s.subj), top_grounding(s.obj)
def nameTuple(s: Influence) -> Tuple[str, str]
Returns a 2-tuple consisting of the top groundings of the subj and obj of an Influence statement.
10.794258
3.922011
2.752225
data = json.loads(request.data) G = AnalysisGraph.from_uncharted_json_serialized_dict(data) G.assemble_transition_model_from_gradable_adjectives() G.sample_from_prior() G.to_sql(app=current_app) _metadata = ICMMetadata.query.filter_by(id=G.id).first().deserialize() del _metadata["model_id"] return jsonify(_metadata)
def createNewICM()
Create a new ICM
9.076485
8.896259
1.020259
_metadata = ICMMetadata.query.filter_by(id=uuid).first().deserialize() del _metadata["model_id"] return jsonify(_metadata)
def getICMByUUID(uuid: str)
Fetch an ICM by UUID
8.808203
7.545318
1.167373
_metadata = ICMMetadata.query.filter_by(id=uuid).first() db.session.delete(_metadata) db.session.commit() return ("", 204)
def deleteICM(uuid: str)
Deletes an ICM
3.773436
3.510068
1.075032
primitives = [ p.deserialize() for p in CausalPrimitive.query.filter_by(model_id=uuid).all() ] for p in primitives: del p["model_id"] return jsonify(primitives)
def getICMPrimitives(uuid: str)
returns all ICM primitives (TODO - needs filter support)
4.989964
4.213706
1.184222
evidences = [ evidence.deserialize() for evidence in Evidence.query.filter_by( causalrelationship_id=prim_id ).all() ] for evidence in evidences: del evidence["causalrelationship_id"] return jsonify(evidences)
def getEvidenceForID(uuid: str, prim_id: str)
returns evidence for a causal primitive (needs pagination support)
4.020826
3.267258
1.230642
data = request.get_json() G = DelphiModel.query.filter_by(id=uuid).first().model if os.environ.get("TRAVIS") is not None: config_file="bmi_config.txt" else: if not os.path.exists("/tmp/delphi"): os.makedirs("/tmp/delphi", exist_ok=True) config_file="/tmp/delphi/bmi_config.txt" G.initialize(initialize_indicators = False, config_file=config_file) for n in G.nodes(data=True): rv = n[1]["rv"] rv.partial_t = 0.0 for variable in data["interventions"]: if n[1]["id"] == variable["id"]: # TODO : Right now, we are only taking the first value in the # "values" list. Need to generalize this so that you can have # multiple interventions at different times. # TODO : The subtraction of 1 is a TEMPORARY PATCH to address # the mismatch in semantics between the ICM API and the Delphi # model. MUST FIX ASAP. rv.partial_t = variable["values"]["value"]["value"] - 1 for s0 in G.s0: s0[f"∂({n[0]})/∂t"] = rv.partial_t break id = str(uuid4()) experiment = ForwardProjection(baseType="ForwardProjection", id=id) db.session.add(experiment) db.session.commit() result = ForwardProjectionResult(id=id, baseType="ForwardProjectionResult") db.session.add(result) db.session.commit() d = dateutil.parser.parse(data["projection"]["startTime"]) n_timesteps = data["projection"]["numSteps"] for i in range(n_timesteps): if data["projection"]["stepSize"] == "MONTH": d = d + relativedelta(months=1) elif data["projection"]["stepSize"] == "YEAR": d = d + relativedelta(years=1) for n in G.nodes(data=True): CausalVariable.query.filter_by( id=n[1]["id"] ).first().lastUpdated = d.isoformat() result.results.append( { "id": n[1]["id"], "baseline": { "active": "ACTIVE", "time": d.isoformat(), "value": {"baseType": "FloatValue", "value": 1.0}, }, "intervened": { "active": "ACTIVE", "time": d.isoformat(), "value": { "baseType": "FloatValue", "value": np.median([s[n[0]] for s in G.s0]), }, }, } ) G.update(update_indicators=False) # Hack for 12-month evaluation - have the partial derivative decay over # time to restore equilibrium tau = 1.0 # Time constant to control the rate of the decay for n in G.nodes(data=True): for variable in data["interventions"]: if n[1]["id"] == variable["id"]: rv = n[1]["rv"] for s0 in G.s0: s0[f"∂({n[0]})/∂t"] = rv.partial_t * exp(-tau*i) db.session.add(result) db.session.commit() return jsonify( { "id": experiment.id, "message": "Forward projection sent successfully", } )
def createExperiment(uuid: str)
Execute an experiment over the model
3.767932
3.728283
1.010635
return jsonify([x.deserialize() for x in Experiment.query.all()])
def getExperiments(uuid: str)
list active (running or completed) experiments
12.848484
10.874336
1.181542
experimentResult = ForwardProjectionResult.query.filter_by( id=exp_id ).first() return jsonify(experimentResult.deserialize())
def getExperiment(uuid: str, exp_id: str)
Fetch experiment results
12.729755
9.553421
1.332481
columns = [ "un_groundings", "subj_polarity", "obj_polarity", "Sentence", "Source API", ] polarity_to_str = lambda x: "+" if x == 1 else "-" if x == -1 else "None" l = [] for s in sts: subj_un_grounding = s.subj.db_refs["UN"][0][0].split("/")[-1] obj_un_grounding = s.obj.db_refs["UN"][0][0].split("/")[-1] subj_polarity = s.subj_delta["polarity"] obj_polarity = s.obj_delta["polarity"] subj_adjectives = s.subj_delta["adjectives"] for e in s.evidence: l.append( ( (subj_un_grounding, obj_un_grounding), subj_polarity, obj_polarity, e.text, e.source_api, ) ) df = pd.DataFrame(l, columns=columns) df = df.pivot_table(index=["un_groundings", "Source API", "Sentence"]) def hover(hover_color="#ffff99"): return dict( selector="tr:hover", props=[("background-color", "%s" % hover_color)], ) styles = [ hover(), dict(props=[("font-size", "100%"), ("font-family", "Gill Sans")]), ] return df.style.set_table_styles(styles)
def create_statement_inspection_table(sts: List[Influence])
Display an HTML representation of a table with INDRA statements to manually inspect for validity. Args: sts: A list of INDRA statements to be manually inspected for validity.
3.023146
2.897035
1.043531
env = os.environ shell = "shell" program = os.path.basename(env["_"]) if "jupyter-notebook" in program: shell = "jupyter-notebook" elif "JPY_PARENT_PID" in env or "ipython" in program: shell = "ipython" if "JPY_PARENT_PID" in env: shell = "ipython-notebook" return shell
def get_python_shell()
Determine python shell get_python_shell() returns 'shell' (started python on command line using "python") 'ipython' (started ipython on command line using "ipython") 'ipython-notebook' (e.g., running in Spyder or started with "ipython qtconsole") 'jupyter-notebook' (running in a Jupyter notebook) See also https://stackoverflow.com/a/37661854
3.678413
3.069752
1.198277
with open(input, "rb") as f: G = pickle.load(f) G = G.get_subgraph_for_concept( "UN/events/weather/precipitation", depth=2, reverse=False ) G.prune(cutoff=2) # Manually correcting a bad CWMS extraction G.edges[ "UN/events/weather/precipitation", "UN/entities/human/infrastructure/transportation/road", ]["InfluenceStatements"][0].obj_delta["polarity"] = -1 with open(output, "wb") as f: pickle.dump(G, f)
def create_precipitation_centered_CAG(input, output)
Get a CAG that examines the downstream effects of changes in precipitation.
7.879923
7.417499
1.062342
module_index_dict = { node["name"]: (node.get("tag"), index) for index, node in enumerate(root) if node.get("tag") in ("module", "program", "subroutine") } return module_index_dict
def index_modules(root) -> Dict
Counts the number of modules in the Fortran file including the program file. Each module is written out into a separate Python file.
4.77494
3.981404
1.19931
if ( self.nameMapper[node["name"]] not in printState.definedVars and self.nameMapper[node["name"]] not in printState.globalVars ): printState.definedVars += [self.nameMapper[node["name"]]] assert int(node["count"]) > 0 printState.definedVars += [node["name"]] varType = "" if node["type"].upper() == "INTEGER": varType = "int" elif node["type"].upper() in ("DOUBLE", "REAL"): varType = "float" elif node["type"].upper() == "CHARACTER": varType = "str" elif node["isDevTypeVar"]: varType = node["type"].lower() + "()" assert varType != "" self.pyStrings.append(f"{node['name']} = Array({varType}, [") for i in range(0, int(node["count"])): loBound = node["low" + str(i + 1)] upBound = node["up" + str(i + 1)] dimensions = f"({loBound}, {upBound})" if i < int(node["count"]) - 1: self.pyStrings.append(f"{dimensions}, ") else: self.pyStrings.append(f"{dimensions}") self.pyStrings.append("])") if node["isDevTypeVar"]: self.pyStrings.append(printState.sep) # This may require updating later when we have to deal with the # multi-dimensional derived type arrays upBound = node["up1"] self.pyStrings.append( f"for z in range(1, {upBound}+1):" + printState.sep ) self.pyStrings.append( f" obj = {node['type']}()" + printState.sep ) self.pyStrings.append( f" {node['name']}.set_(z, obj)" + printState.sep )
def printArray(self, node, printState: PrintState)
Prints out the array declaration in a format of Array class object declaration. 'arrayName = Array(Type, [bounds])'
3.035245
2.909245
1.04331
A = to_agraph(G) A.graph_attr["rankdir"] = "LR" A.draw(filename, prog="dot")
def draw_graph(G: nx.DiGraph, filename: str)
Draw a networkx graph with Pygraphviz.
3.76259
2.819263
1.3346
return [n for n, d in G.in_degree() if d == 0]
def get_input_nodes(G: nx.DiGraph) -> List[str]
Get all input nodes from a network.
2.839655
2.240824
1.267237
return [n for n, d in G.out_degree() if d == 0]
def get_output_nodes(G: nx.DiGraph) -> List[str]
Get all output nodes from a network.
2.989378
2.350573
1.271765
return nx.DiGraph(read_dot(filename).reverse())
def nx_graph_from_dotfile(filename: str) -> nx.DiGraph
Get a networkx graph from a DOT file, and reverse the edges.
15.830872
6.127468
2.583591
A = to_agraph(G) A.write(filename)
def to_dotfile(G: nx.DiGraph, filename: str)
Output a networkx graph to a DOT file.
8.741064
5.577115
1.567309
return list(set(G1.nodes()).intersection(set(G2.nodes())))
def get_shared_nodes(G1: nx.DiGraph, G2: nx.DiGraph) -> List[str]
Get all the nodes that are common to both networks.
2.935899
2.176776
1.348737
if line[0] in "cCdD*!": return True llstr = line.strip() if len(llstr) == 0 or llstr[0] == "!": return True return False
def line_is_comment(line: str) -> bool
From FORTRAN Language Reference (https://docs.oracle.com/cd/E19957-01/805-4939/z40007332024/index.html): A line with a c, C, *, d, D, or ! in column one is a comment line, except that if the -xld option is set, then the lines starting with D or d are compiled as debug lines. The d, D, and ! are nonstandard. If you put an exclamation mark (!) in any column of the statement field, except within character literals, then everything after the ! on that line is a comment. A totally blank line is a comment line. Args: line Returns: True iff line is a comment, False otherwise.
6.086111
4.035263
1.508232
match = RE_SUB_START.match(line) if match != None: f_name = match.group(1) return (True, f_name) match = RE_FN_START.match(line) if match != None: f_name = match.group(1) return (True, f_name) return (False, None)
def line_starts_subpgm(line: str) -> Tuple[bool, Optional[str]]
Indicates whether a line in the program is the first line of a subprogram definition. Args: line Returns: (True, f_name) if line begins a definition for subprogram f_name; (False, None) if line does not begin a subprogram definition.
2.20415
1.90285
1.158341
match = RE_PGM_UNIT_START.match(line) assert match != None return match.group(2)
def program_unit_name(line:str) -> str
Given a line that starts a program unit, i.e., a program, module, subprogram, or function, this function returns the name associated with that program unit.
6.472527
5.281526
1.225503
llstr = line.lstrip() return len(llstr) > 0 and llstr[0] == "&"
def line_is_continuation(line: str) -> bool
Args: line Returns: True iff line is a continuation line, else False.
7.344244
5.499273
1.335494
if line_is_comment(line): return False if re.match(RE_TYPE_NAMES, line): return False for exp in EXECUTABLE_CODE_START: if re.match(exp, line) != None: return True return False
def line_is_executable(line: str) -> bool
line_is_executable() returns True iff the line can start an executable statement in a program.
4.727977
4.614385
1.024617
return chain([x], xs)
def prepend(x: T, xs: Iterable[T]) -> Iterator[T]
Prepend a value to an iterable. Parameters ---------- x An element of type T. xs An iterable of elements of type T. Returns ------- Iterator An iterator that yields *x* followed by elements of *xs*. Examples -------- >>> from delphi.utils.fp import prepend >>> list(prepend(1, [2, 3])) [1, 2, 3]
9.201097
97.896278
0.093988
return chain(xs, [x])
def append(x: T, xs: Iterable[T]) -> Iterator[T]
Append a value to an iterable. Parameters ---------- x An element of type T. xs An iterable of elements of type T. Returns ------- Iterator An iterator that yields elements of *xs*, then yields *x*. Examples -------- >>> from delphi.utils.fp import append >>> list(append(1, [2, 3])) [2, 3, 1]
12.335661
32.645649
0.377865
return accumulate(prepend(x, xs), f)
def scanl(f: Callable[[T, U], T], x: T, xs: Iterable[U]) -> Iterator[T]
Make an iterator that returns accumulated results of a binary function applied to elements of an iterable. .. math:: scanl(f, x_0, [x_1, x_2, ...]) = [x_0, f(x_0, x_1), f(f(x_0, x_1), x_2), ...] Parameters ---------- f A binary function of two arguments of type T. x An initializer element of type T. xs An iterable of elements of type T. Returns ------- Iterator The iterator of accumulated results. Examples -------- >>> from delphi.utils.fp import scanl >>> list(scanl(lambda x, y: x + y, 10, range(5))) [10, 10, 11, 13, 16, 20]
10.457699
36.233574
0.288619
return accumulate(xs, f)
def scanl1(f: Callable[[T, T], T], xs: Iterable[T]) -> Iterator[T]
Make an iterator that returns accumulated results of a binary function applied to elements of an iterable. .. math:: scanl1(f, [x_0, x_1, x_2, ...]) = [x_0, f(x_0, x_1), f(f(x_0, x_1), x_2), ...] Parameters ---------- f A binary function of two arguments of type T. xs An iterable of elements of type T. Returns ------- Iterator The iterator of accumulated results. Examples -------- >>> from delphi.utils.fp import scanl1 >>> list(scanl1(lambda x, y: x + y, range(5))) [0, 1, 3, 6, 10]
14.537207
62.706932
0.231828
return reduce(f, xs, x)
def foldl(f: Callable[[T, U], T], x: T, xs: Iterable[U]) -> T
Returns the accumulated result of a binary function applied to elements of an iterable. .. math:: foldl(f, x_0, [x_1, x_2, x_3]) = f(f(f(f(x_0, x_1), x_2), x_3) Examples -------- >>> from delphi.utils.fp import foldl >>> foldl(lambda x, y: x + y, 10, range(5)) 20
8.673016
41.785534
0.20756
return reduce(f, xs)
def foldl1(f: Callable[[T, T], T], xs: Iterable[T]) -> T
Returns the accumulated result of a binary function applied to elements of an iterable. .. math:: foldl1(f, [x_0, x_1, x_2, x_3]) = f(f(f(f(x_0, x_1), x_2), x_3) Examples -------- >>> from delphi.utils.fp import foldl1 >>> foldl1(lambda x, y: x + y, range(5)) 10
23.137264
39.975464
0.578787
return ( sum(map(flatten, xs), []) if (isinstance(xs, list) or isinstance(xs, tuple)) else [xs] )
def flatten(xs: Union[List, Tuple]) -> List
Flatten a nested list or tuple.
3.325545
2.598588
1.279751
return scanl(lambda x, _: f(x), x, repeat(None))
def iterate(f: Callable[[T], T], x: T) -> Iterator[T]
Makes infinite iterator that returns the result of successive applications of a function to an element .. math:: iterate(f, x) = [x, f(x), f(f(x)), f(f(f(x))), ...] Examples -------- >>> from delphi.utils.fp import iterate, take >>> list(take(5, iterate(lambda x: x*2, 1))) [1, 2, 4, 8, 16]
8.113264
22.40728
0.362082
return tqdm(take(n, xs), total=n)
def ptake(n: int, xs: Iterable[T]) -> Iterable[T]
take with a tqdm progress bar.
7.2638
3.458182
2.100468
return list(take(n, xs))
def ltake(n: int, xs: Iterable[T]) -> List[T]
A non-lazy version of take.
4.939238
4.338319
1.138514
return foldl1(lambda f, g: lambda *x: f(g(*x)), fs)
def compose(*fs: Any) -> Callable
Compose functions from left to right. e.g. compose(f, g)(x) = f(g(x))
5.817053
5.43034
1.071213
return foldl1(lambda f, g: lambda *x: g(f(*x)), fs)
def rcompose(*fs: Any) -> Callable
Compose functions from right to left. e.g. rcompose(f, g)(x) = g(f(x))
6.328224
5.890402
1.074328
return flatten(lmap(f, xs))
def flatMap(f: Callable, xs: Iterable) -> List
Map a function onto an iterable and flatten the result.
17.909992
8.443766
2.121091
args = [iter(xs)] * n return zip_longest(*args, fillvalue=fillvalue)
def grouper(xs: Iterable, n: int, fillvalue=None)
Collect data into fixed-length chunks or blocks. >>> from delphi.utils.fp import grouper >>> list(grouper('ABCDEFG', 3, 'x')) [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
3.205609
4.418711
0.725462
climis_crop_production_csvs = glob( "{data_dir}/Climis South Sudan Crop Production Data/" "Crops_EstimatedProductionConsumptionBalance*.csv" ) state_county_df = pd.read_csv( f"{data_dir}/ipc_data.csv", skipinitialspace=True ) combined_records = [] for f in climis_crop_production_csvs: year = int(f.split("/")[-1].split("_")[2].split(".")[0]) df = pd.read_csv(f).dropna() for i, r in df.iterrows(): record = { "Year": year, "Month": None, "Source": "CliMIS", "Country": "South Sudan", } region = r["State/County"].strip() if region.lower() in state_county_df["State"].str.lower().values: record["State"] = region record["County"] = None else: potential_states = state_county_df.loc[ state_county_df["County"] == region ]["State"] record["State"] = ( potential_states.iloc[0] if len(potential_states) != 0 else None ) record["County"] = region for field in r.index: if field != "State/County": if "Net Cereal production" in field: record["Variable"] = "Net Cereal Production" record["Value"] = r[field] if field.split()[-1].startswith("("): record["Unit"] = field.split()[-1][1:-1].lower() else: record["Unit"] = None combined_records.append(record) df = pd.DataFrame(combined_records) return df
def process_climis_crop_production_data(data_dir: str)
Process CliMIS crop production data
2.991017
2.89694
1.032474
enumerated_lines = list(enumerate(lines, 1)) i = 0 while i < len(enumerated_lines): (n, code_line) = enumerated_lines[i] if not line_is_comment(code_line): (code_part, comment_part) = split_trailing_comment(code_line) if comment_part is not None: enumerated_lines[i] = (n, comment_part) enumerated_lines.insert(i + 1, (n, code_part)) i += 1 return enumerated_lines
def separate_trailing_comments(lines: List[str]) -> List[Tuple[int, str]]
Given a list of numbered Fortran source code lines, i.e., pairs of the form (n, code_line) where n is a line number and code_line is a line of code, separate_trailing_comments() behaves as follows: for each pair (n, code_line) where code_line can be broken into two parts -- a code portion code_part and a trailing comment portion comment_part, such that code_part and comment_part are both non-empty, it replaces the pair (n, code_line) by two pairs (n, comment_part) and (n, code_part). The return value is the resulting list of numbered lines.
2.385787
1.935791
1.232461
# Before a continuation line L1 is merged with the line L0 before it (and # presumably the one L1 is continuing), ensure that L0 is not a comment. # If L0 is a comment, swap L0 and L1. chg = True while chg: chg = False i = 0 while i < len(lines) - 1: ln0, ln1 = lines[i], lines[i + 1] if line_is_comment(ln0[1]) and line_is_continuation(ln1[1]): # swap the code portions of lines[i] and lines[i+1] lines[i], lines[i + 1] = (ln0[0], ln1[1]), (ln1[0], ln0[1]) chg = True i += 1 # Merge continuation lines chg = True while chg: chg = False i = 0 while i < len(lines): line = lines[i] if line_is_continuation(line[1]): assert i > 0 (prev_linenum, prev_line_code) = lines[i - 1] curr_line_code = line[1].lstrip()[ 1: ] # remove continuation char merged_code = prev_line_code.rstrip() + curr_line_code.lstrip() lines[i - 1] = (prev_linenum, merged_code) lines.pop(i) chg = True i += 1 return lines
def merge_continued_lines(lines)
Given a list of numered Fortran source code lines, i.e., pairs of the form (n, code_line) where n is a line number and code_line is a line of code, merge_continued_lines() merges sequences of lines that are indicated to be continuation lines.
2.854334
2.76513
1.032261
if line_is_comment(line): return "comment" elif line_is_executable(line): return "exec_stmt" elif line_is_pgm_unit_end(line): return "pgm_unit_end" else: if line_is_pgm_unit_start(line): return "pgm_unit_start" else: return "other"
def type_of_line(line)
Given a line of code, type_of_line() returns a string indicating what kind of code it is.
2.779669
2.701728
1.028848
curr_comment = [] curr_fn, prev_fn, curr_marker = None, None, None comments = OrderedDict() # curr_state refers to the state of the finite-state machine (see above) curr_state = "outside" comments["$file_head"] = [] comments["$file_foot"] = [] for i in range(len(lines)): (linenum, line) = lines[i] # determine what kind of line this is line_type = type_of_line(line) # process the line appropriately if curr_state == "outside": assert line_type in ("comment", "pgm_unit_start"), ( line_type, line, ) if line_type == "comment": curr_comment.append(line) lines[i] = (linenum, None) else: # line_type == "pgm_unit_start" pgm_unit_name = program_unit_name(line) comments["$file_head"] = curr_comment if prev_fn is not None: comments[prev_fn]["foot"] = curr_comment prev_fn = curr_fn curr_fn = pgm_unit_name internal_comments = OrderedDict() comments[curr_fn] = init_comment_map( curr_comment, [], [], internal_comments ) curr_comment = [] elif curr_state == "in_neck": assert line_type in ("comment", "exec_stmt", "other") if line_type == "comment": curr_comment.append(line) lines[i] = (linenum, None) elif line_type == "exec_stmt": comments[curr_fn]["neck"] = curr_comment curr_comment = [] else: pass # nothing to do -- continue elif curr_state == "in_body": assert line_type in ( "comment", "exec_stmt", "pgm_unit_end", ), f"[Line {linenum}]: {line}" if line_type == "comment": # Ignore empty lines, which are technically comments but which # don't contribute any semantic content. if line != "\n": marker_var = f"{INTERNAL_COMMENT_PREFIX}_{linenum}" marker_stmt = f" {marker_var} = .True.\n" internal_comments[marker_var] = line lines[i] = (linenum, marker_stmt) else: pass # nothing to do -- continue # update the current state curr_state = TRANSITIONS[curr_state][line_type] # if there's a comment at the very end of the file, make it the foot # comment of curr_fn if curr_comment != [] and comments.get(curr_fn): comments[curr_fn]["foot"] = curr_comment comments["$file_foot"] = curr_comment return (lines, comments)
def extract_comments( lines: List[Tuple[int, str]] ) -> Tuple[List[Tuple[int, str]], Dict[str, List[str]]]
Given a list of numbered lines from a Fortran file where comments internal to subprogram bodies have been moved out into their own lines, extract_comments() extracts comments into a dictionary and replaces each comment internal to subprogram bodies with a marker statement. It returns a pair (code, comments) where code is a list of numbered lines with comments removed and marker statements (plus corresponding variable declarations) added; and comments is a dictionary mapping marker statement variables to the corresponding comments.
3.263098
3.214581
1.015093
if line.find("!") == -1: return (line, None) i = 0 while i < len(line): if line[i] == "'": j = line.find("'", i + 1) if j == -1: sys.stderr.write("WEIRD: unbalanced quote ': line = " + line) return line else: i = j + 1 elif line[i] == '"': j = line.find('"', i + 1) if j == -1: sys.stderr.write('WEIRD: unbalanced quote ": line = ' + line) return line else: i = j + 1 elif line[i] == "!": # partial-line comment comment_part = line[i:] code_part = line[:i].rstrip() + "\n" return (code_part, comment_part) else: i += 1 return (line, None)
def split_trailing_comment(line: str) -> str
Takes a line and splits it into two parts (code_part, comment_part) where code_part is the line up to but not including any trailing comment (the '!' comment character and subsequent characters to the end of the line), while comment_part is the trailing comment. Args: line: A line of Fortran source code. Returns: A pair (code_part, comment_part) where comment_part is the trailing comment. If the line does not contain any trailing comment, then comment_part is None.
2.536939
2.269598
1.117792
lines = separate_trailing_comments(inputLines) merge_continued_lines(lines) (lines, comments) = extract_comments(lines) actual_lines = [ line[1] for line in lines if line[1] is not None and "i_g_n_o_r_e___m_e_" not in line[1] ] return "".join(actual_lines)
def process(inputLines: List[str]) -> str
process() provides the interface used by an earlier version of this preprocessor.
6.583085
6.298597
1.045167
for node in self.nodes(data=True): node[1]["id"] = str(uuid4()) for edge in self.edges(data=True): edge[2]["id"] = str(uuid4())
def assign_uuids_to_nodes_and_edges(self)
Assign uuids to nodes and edges.
2.216356
1.966489
1.127063
with open(file, "rb") as f: sts = pickle.load(f) return cls.from_statements(sts)
def from_statements_file(cls, file: str)
Construct an AnalysisGraph object from a pickle file containing a list of INDRA statements.
4.576714
3.022969
1.51398
_dict = {} for s in sts: if assign_default_polarities: for delta in deltas(s): if delta["polarity"] is None: delta["polarity"] = 1 concepts = nameTuple(s) # Excluding self-loops for now: if concepts[0] != concepts[1]: if all( map(exists, (delta["polarity"] for delta in deltas(s))) ): if concepts in _dict: _dict[concepts].append(s) else: _dict[concepts] = [s] edges = [ (*concepts, {"InfluenceStatements": statements}) for concepts, statements in _dict.items() ] return cls(edges)
def from_statements( cls, sts: List[Influence], assign_default_polarities: bool = True )
Construct an AnalysisGraph object from a list of INDRA statements. Unknown polarities are set to positive by default. Args: sts: A list of INDRA Statements Returns: An AnalysisGraph instance constructed from a list of INDRA statements.
4.505031
4.430318
1.016864
eidosProcessor = process_text(text) return cls.from_statements(eidosProcessor.statements)
def from_text(cls, text: str)
Construct an AnalysisGraph object from text, using Eidos to perform machine reading.
23.594572
9.121926
2.586578
with open(file, "r") as f: _dict = json.load(f) return cls.from_uncharted_json_serialized_dict(_dict)
def from_uncharted_json_file(cls, file)
Construct an AnalysisGraph object from a file containing INDRA statements serialized exported by Uncharted's CauseMos webapp.
3.408164
3.76241
0.905846
sts = _dict["statements"] G = nx.DiGraph() for s in sts: if len(s["evidence"]) >= minimum_evidence_pieces_required: subj, obj = s["subj"], s["obj"] if ( subj["db_refs"]["concept"] is not None and obj["db_refs"]["concept"] is not None ): subj_name, obj_name = [ "/".join(s[x]["db_refs"]["concept"].split("/")[:]) for x in ["subj", "obj"] ] G.add_edge(subj_name, obj_name) subj_delta = s["subj_delta"] obj_delta = s["obj_delta"] for delta in (subj_delta, obj_delta): # TODO : Ensure that all the statements provided by # Uncharted have unambiguous polarities. if delta["polarity"] is None: delta["polarity"] = 1 influence_stmt = Influence( Concept(subj_name, db_refs=subj["db_refs"]), Concept(obj_name, db_refs=obj["db_refs"]), subj_delta=s["subj_delta"], obj_delta=s["obj_delta"], evidence=[ INDRAEvidence( source_api=ev["source_api"], annotations=ev["annotations"], text=ev["text"], epistemics=ev.get("epistemics"), ) for ev in s["evidence"] ], ) influence_sts = G.edges[subj_name, obj_name].get( "InfluenceStatements", [] ) influence_sts.append(influence_stmt) G.edges[subj_name, obj_name][ "InfluenceStatements" ] = influence_sts for concept, indicator in _dict[ "concept_to_indicator_mapping" ].items(): if indicator is not None: indicator_source, indicator_name = ( indicator["name"].split("/")[0], "/".join(indicator["name"].split("/")[1:]), ) if concept in G: if G.nodes[concept].get("indicators") is None: G.nodes[concept]["indicators"] = {} G.nodes[concept]["indicators"][indicator_name] = Indicator( indicator_name, indicator_source ) self = cls(G) self.assign_uuids_to_nodes_and_edges() return self
def from_uncharted_json_serialized_dict( cls, _dict, minimum_evidence_pieces_required: int = 1 )
Construct an AnalysisGraph object from a dict of INDRA statements exported by Uncharted's CauseMos webapp.
2.692025
2.494018
1.079393
df = pd.read_sql_table("gradableAdjectiveData", con=engine) gb = df.groupby("adjective") rs = gaussian_kde( flatMap( lambda g: gaussian_kde(get_respdevs(g[1])) .resample(self.res)[0] .tolist(), gb, ) ).resample(self.res)[0] for edge in self.edges(data=True): edge[2]["ConditionalProbability"] = constructConditionalPDF( gb, rs, edge ) edge[2]["βs"] = np.tan( edge[2]["ConditionalProbability"].resample(self.res)[0] )
def assemble_transition_model_from_gradable_adjectives(self)
Add probability distribution functions constructed from gradable adjective data to the edges of the analysis graph data structure. Args: adjective_data res
7.058688
6.78685
1.040054
# simple_path_dict caches the results of the graph traversal that finds # simple paths between pairs of nodes, so that it doesn't have to be # executed for every sampled transition matrix. node_pairs = list(permutations(self.nodes(), 2)) simple_path_dict = { node_pair: [ list(pairwise(path)) for path in nx.all_simple_paths(self, *node_pair) ] for node_pair in node_pairs } self.transition_matrix_collection = [] elements = self.get_latent_state_components() for i in range(self.res): A = pd.DataFrame( np.identity(2 * len(self)), index=elements, columns=elements ) for node in self.nodes: A[f"∂({node})/∂t"][node] = self.Δt for node_pair in node_pairs: A[f"∂({node_pair[0]})/∂t"][node_pair[1]] = sum( np.prod( [ self.edges[edge[0], edge[1]]["βs"][i] for edge in simple_path_edge_list ] ) * self.Δt for simple_path_edge_list in simple_path_dict[node_pair] ) self.transition_matrix_collection.append(A)
def sample_from_prior(self)
Sample elements of the stochastic transition matrix from the prior distribution, based on gradable adjectives.
4.16186
3.94225
1.055707
return { n[0]: { i.name: np.random.normal(s[n[0]] * i.mean, i.stdev) for i in n[1]["indicators"].values() } for n in self.nodes(data=True) }
def sample_observed_state(self, s: pd.Series) -> Dict
Sample observed state vector. This is the implementation of the emission function. Args: s: Latent state vector. Returns: Observed state vector.
5.358847
6.487767
0.825992
self.latent_state_sequences = lmap( lambda A: ltake( n_timesteps, iterate( lambda s: pd.Series(A @ s.values, index=s.index), self.s0 ), ), self.transition_matrix_collection, ) self.observed_state_sequences = [ [self.sample_observed_state(s) for s in latent_state_sequence] for latent_state_sequence in self.latent_state_sequences ]
def sample_from_likelihood(self, n_timesteps=10)
Sample a collection of observed state sequences from the likelihood model given a collection of transition matrices. Args: n_timesteps: The number of timesteps for the sequences.
4.962832
4.319321
1.148984
# Choose the element of A to perturb self.source, self.target, self.edge_dict = random.choice( list(self.edges(data=True)) ) self.original_value = A[f"∂({self.source})/∂t"][self.target] A[f"∂({self.source})/∂t"][self.target] += np.random.normal(scale=0.001)
def sample_from_proposal(self, A: pd.DataFrame) -> None
Sample a new transition matrix from the proposal distribution, given a current candidate transition matrix. In practice, this amounts to the in-place perturbation of an element of the transition matrix currently being used by the sampler. Args
4.613928
4.938904
0.934201
if resolution == "month": funcs = [ partial(get_indicator_value, month=month) for month in months ] else: raise NotImplementedError( "Currently, only the 'month' resolution is supported." ) for n in self.nodes(data=True): for indicator in n[1]["indicators"].values(): indicator.timeseries = [ func(indicator, year="2017")[0] for func in funcs ] if len(set(indicator.timeseries)) == 1: indicator.timeseries = None
def get_timeseries_values_for_indicators( self, resolution: str = "month", months: Iterable[int] = range(6, 9) )
Attach timeseries to indicators, for performing Bayesian inference.
3.837507
3.620674
1.059887
self.sample_from_proposal(A) self.set_latent_state_sequence(A) self.update_log_prior(A) self.update_log_likelihood() candidate_log_joint_probability = self.log_prior + self.log_likelihood delta_log_joint_probability = ( candidate_log_joint_probability - self.log_joint_probability ) acceptance_probability = min(1, np.exp(delta_log_joint_probability)) if acceptance_probability > np.random.rand(): self.update_log_joint_probability() else: A[f"∂({self.source})/∂t"][self.target] = self.original_value self.set_latent_state_sequence(A) self.update_log_likelihood() self.update_log_prior(A) self.update_log_joint_probability()
def sample_from_posterior(self, A: pd.DataFrame) -> None
Run Bayesian inference - sample from the posterior distribution.
3.04685
2.946581
1.034029
rows = engine.execute( f"select * from dssat where `Crop` like '{crop}'" f" and `State` like '{state}'" ) xs, ys = lzip(*[(r["Rainfall"], r["Production"]) for r in rows]) xs_scaled, ys_scaled = xs / np.mean(xs), ys / np.mean(ys) p, V = np.polyfit(xs_scaled, ys_scaled, 1, cov=True) self.edges[source, target]["βs"] = np.random.normal( p[0], np.sqrt(V[0][0]), self.res ) self.sample_from_prior()
def infer_transition_matrix_coefficient_from_data( self, source: str, target: str, state: Optional[str] = None, crop: Optional[str] = None, )
Infer the distribution of a particular transition matrix coefficient from data. Args: source: The source of the edge corresponding to the matrix element to infer. target: The target of the edge corresponding to the matrix element to infer. state: The state in South Sudan for which the transition matrix coefficient should be calculated. crop: The crop for which the transition matrix coefficient should be calculated.
5.518818
5.514299
1.000819
s0 = self.construct_default_initial_state() s0.to_csv(filename, index_label="variable")
def create_bmi_config_file(self, filename: str = "bmi_config.txt") -> None
Create a BMI config file to initialize the model. Args: filename: The filename with which the config file should be saved.
14.324367
12.861129
1.113772
return [ self.transition_matrix_collection[i].loc[n[0]].values @ self.s0[i].values for i in range(self.res) ]
def default_update_function(self, n: Tuple[str, dict]) -> List[float]
The default update function for a CAG node. n: A 2-tuple containing the node name and node data. Returns: A list of values corresponding to the distribution of the value of the real-valued variable representing the node.
9.361454
13.243106
0.706893
self.t = 0.0 if not os.path.isfile(config_file): self.create_bmi_config_file(config_file) self.s0 = [ pd.read_csv( config_file, index_col=0, header=None, error_bad_lines=False )[1] for _ in range(self.res) ] self.s0_original = self.s0[0].copy(deep=True) self.latent_state_vector = self.construct_default_initial_state() for n in self.nodes(data=True): rv = LatentVar(n[0]) n[1]["rv"] = rv n[1]["update_function"] = self.default_update_function rv.dataset = [1.0 for _ in range(self.res)] rv.partial_t = self.s0[0][f"∂({n[0]})/∂t"] if initialize_indicators: for indicator in n[1]["indicators"].values(): indicator.samples = np.random.normal( indicator.mean * np.array(n[1]["rv"].dataset), scale=0.01, )
def initialize( self, config_file: str = "bmi_config.txt", initialize_indicators=True )
Initialize the executable AnalysisGraph with a config file. Args: config_file Returns: AnalysisGraph
3.928004
4.00329
0.981194
for n in self.nodes(data=True): n[1]["next_state"] = n[1]["update_function"](n) for n in self.nodes(data=True): n[1]["rv"].dataset = n[1]["next_state"] for n in self.nodes(data=True): for i in range(self.res): self.s0[i][n[0]] = n[1]["rv"].dataset[i] if dampen: self.s0[i][f"∂({n[0]})/∂t"] = self.s0_original[ f"∂({n[0]})/∂t" ] * exp(-τ * self.t) if update_indicators: for indicator in n[1]["indicators"].values(): indicator.samples = np.random.normal( indicator.mean * np.array(n[1]["rv"].dataset), scale=0.01, ) self.t += self.Δt
def update(self, τ: float = 1.0, update_indicators=True, dampen=False)
Advance the model by one time step.
3.426154
3.361232
1.019315
node_dict = { "name": n[0], "units": _get_units(n[0]), "dtype": _get_dtype(n[0]), "arguments": list(self.predecessors(n[0])), } if not n[1].get("indicators") is None: for indicator in n[1]["indicators"].values(): if "dataset" in indicator.__dict__: del indicator.__dict__["dataset"] node_dict["indicators"] = [ _process_datetime(indicator.__dict__) for indicator in n[1]["indicators"].values() ] else: node_dict["indicators"] = None return node_dict
def export_node(self, n) -> Dict[str, Union[str, List[str]]]
Return dict suitable for exporting to JSON. Args: n: A dict representing the data in a networkx AnalysisGraph node. Returns: The node dict with additional fields for name, units, dtype, and arguments.
3.511176
3.130714
1.121526
return { "name": self.name, "dateCreated": str(self.dateCreated), "variables": lmap( lambda n: self.export_node(n), self.nodes(data=True) ), "timeStep": str(self.Δt), "edge_data": lmap(export_edge, self.edges(data=True)), }
def to_dict(self) -> Dict
Export the CAG to a dict that can be serialized to JSON.
6.058352
5.036448
1.202902
for node in self.nodes(data=True): query_parts = [ "select Indicator from concept_to_indicator_mapping", f"where `Concept` like '{node[0]}'", ] # TODO May need to delve into SQL/database stuff a bit more deeply # for this. Foreign keys perhaps? query = " ".join(query_parts) results = engine.execute(query) if min_temporal_res is not None: if min_temporal_res not in ["month"]: raise ValueError("min_temporal_res must be 'month'") vars_with_required_temporal_resolution = [ r[0] for r in engine.execute( "select distinct `Variable` from indicator where " f"`{min_temporal_res.capitalize()}` is not null" ) ] results = [ r for r in results if r[0] in vars_with_required_temporal_resolution ] node[1]["indicators"] = { x: Indicator(x, "MITRE12") for x in [r[0] for r in take(n, results)] }
def map_concepts_to_indicators( self, n: int = 1, min_temporal_res: Optional[str] = None )
Map each concept node in the AnalysisGraph instance to one or more tangible quantities, known as 'indicators'. Args: n: Number of matches to keep min_temporal_res: Minimum temporal resolution that the indicators must have data for.
4.773666
4.643927
1.027937