code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
payload = { "device-context": self._build_payload(device_id, obj_slot_id) } return self._post(self.url_prefix, payload)
def switch(self, device_id, obj_slot_id)
Switching of device-context
5.388583
3.928224
1.371761
payload = { "rib": self._build_payload(destination, mask, next_hops) } return self._post(self.url_prefix, payload)
def create(self, destination, mask, next_hops=[])
Create route to {destination} {mask} using {next_hops} expressed as (gateway, distance)
5.722727
5.437198
1.052514
for f in ["INCAR", "POSCAR", "POTCAR", "KPOINTS"]: if not os.path.exists(os.path.join(dir_name, f)) and \ not os.path.exists(os.path.join(dir_name, f + ".orig")): return False return True
def contains_vasp_input(dir_name)
Checks if a directory contains valid VASP input. Args: dir_name: Directory name to check. Returns: True if directory contains all four VASP input files (INCAR, POSCAR, KPOINTS and POTCAR).
2.021113
2.447448
0.825804
structure = Structure.from_dict(d["output"]["crystal"]) f = VoronoiNN() cn = [] for i, s in enumerate(structure.sites): try: n = f.get_cn(structure, i) number = int(round(n)) cn.append({"site": s.as_dict(), "coordination": number}) except Exception: logger.error("Unable to parse coordination errors") return cn
def get_coordination_numbers(d)
Helper method to get the coordination number of all sites in the final structure from a run. Args: d: Run dict generated by VaspToDbTaskDrone. Returns: Coordination numbers as a list of dict of [{"site": site_dict, "coordination": number}, ...].
5.422363
4.716802
1.149585
fullpath = os.path.abspath(dir_name) try: hostname = socket.gethostbyaddr(socket.gethostname())[0] except: hostname = socket.gethostname() return "{}:{}".format(hostname, fullpath)
def get_uri(dir_name)
Returns the URI path for a directory. This allows files hosted on different file servers to have distinct locations. Args: dir_name: A directory name. Returns: Full URI path, e.g., fileserver.host.com:/full/path/of/dir_name.
2.479531
3.048292
0.813416
try: d = self.get_task_doc(path) if self.mapi_key is not None and d["state"] == "successful": self.calculate_stability(d) tid = self._insert_doc(d) return tid except Exception as ex: import traceback logger.error(traceback.format_exc()) return False
def assimilate(self, path)
Parses vasp runs. Then insert the result into the db. and return the task_id or doc of the insertion. Returns: If in simulate_mode, the entire doc is returned for debugging purposes. Else, only the task_id of the inserted doc is returned.
6.328373
5.690033
1.112186
logger.info("Getting task doc for base dir :{}".format(path)) files = os.listdir(path) vasprun_files = OrderedDict() if "STOPCAR" in files: #Stopped runs. Try to parse as much as possible. logger.info(path + " contains stopped run") for r in self.runs: if r in files: #try subfolder schema for f in os.listdir(os.path.join(path, r)): if fnmatch(f, "vasprun.xml*"): vasprun_files[r] = os.path.join(r, f) else: #try extension schema for f in files: if fnmatch(f, "vasprun.xml.{}*".format(r)): vasprun_files[r] = f if len(vasprun_files) == 0: for f in files: #get any vasprun from the folder if fnmatch(f, "vasprun.xml*") and \ f not in vasprun_files.values(): vasprun_files['standard'] = f if len(vasprun_files) > 0: d = self.generate_doc(path, vasprun_files) if not d: d = self.process_killed_run(path) self.post_process(path, d) elif (not (path.endswith("relax1") or path.endswith("relax2"))) and contains_vasp_input(path): #If not Materials Project style, process as a killed run. logger.warning(path + " contains killed run") d = self.process_killed_run(path) self.post_process(path, d) else: raise ValueError("No VASP files found!") return d
def get_task_doc(self, path)
Get the entire task doc for a path, including any post-processing.
3.874005
3.857425
1.004298
logger.info("Post-processing dir:{}".format(dir_name)) fullpath = os.path.abspath(dir_name) # VASP input generated by pymatgen's alchemy has a # transformations.json file that keeps track of the origin of a # particular structure. This is extremely useful for tracing back a # result. If such a file is found, it is inserted into the task doc # as d["transformations"] transformations = {} filenames = glob.glob(os.path.join(fullpath, "transformations.json*")) if len(filenames) >= 1: with zopen(filenames[0], "rt") as f: transformations = json.load(f) try: m = re.match("(\d+)-ICSD", transformations["history"][0]["source"]) if m: d["icsd_id"] = int(m.group(1)) except Exception as ex: logger.warning("Cannot parse ICSD from transformations " "file.") pass else: logger.warning("Transformations file does not exist.") other_parameters = transformations.get("other_parameters") new_tags = None if other_parameters: # We don't want to leave tags or authors in the # transformations file because they'd be copied into # every structure generated after this one. new_tags = other_parameters.pop("tags", None) new_author = other_parameters.pop("author", None) if new_author: d["author"] = new_author if not other_parameters: # if dict is now empty remove it transformations.pop("other_parameters") d["transformations"] = transformations # Calculations done using custodian has a custodian.json, # which tracks the jobs performed and any errors detected and fixed. # This is useful for tracking what has actually be done to get a # result. If such a file is found, it is inserted into the task doc # as d["custodian"] filenames = glob.glob(os.path.join(fullpath, "custodian.json*")) if len(filenames) >= 1: with zopen(filenames[0], "rt") as f: d["custodian"] = json.load(f) # Parse OUTCAR for additional information and run stats that are # generally not in vasprun.xml. try: run_stats = {} for filename in glob.glob(os.path.join(fullpath, "OUTCAR*")): outcar = Outcar(filename) i = 1 if re.search("relax2", filename) else 0 taskname = "relax2" if re.search("relax2", filename) else \ "relax1" d["calculations"][i]["output"]["outcar"] = outcar.as_dict() run_stats[taskname] = outcar.run_stats except: logger.error("Bad OUTCAR for {}.".format(fullpath)) try: overall_run_stats = {} for key in ["Total CPU time used (sec)", "User time (sec)", "System time (sec)", "Elapsed time (sec)"]: overall_run_stats[key] = sum([v[key] for v in run_stats.values()]) run_stats["overall"] = overall_run_stats except: logger.error("Bad run stats for {}.".format(fullpath)) d["run_stats"] = run_stats #Convert to full uri path. if self.use_full_uri: d["dir_name"] = get_uri(dir_name) if new_tags: d["tags"] = new_tags logger.info("Post-processed " + fullpath)
def post_process(self, dir_name, d)
Simple post-processing for various files other than the vasprun.xml. Called by generate_task_doc. Modify this if your runs have other kinds of processing requirements. Args: dir_name: The dir_name. d: Current doc generated.
3.669685
3.605777
1.017724
fullpath = os.path.abspath(dir_name) logger.info("Processing Killed run " + fullpath) d = {"dir_name": fullpath, "state": "killed", "oszicar": {}} for f in os.listdir(dir_name): filename = os.path.join(dir_name, f) if fnmatch(f, "INCAR*"): try: incar = Incar.from_file(filename) d["incar"] = incar.as_dict() d["is_hubbard"] = incar.get("LDAU", False) if d["is_hubbard"]: us = np.array(incar.get("LDAUU", [])) js = np.array(incar.get("LDAUJ", [])) if sum(us - js) == 0: d["is_hubbard"] = False d["hubbards"] = {} else: d["hubbards"] = {} if d["is_hubbard"]: d["run_type"] = "GGA+U" elif incar.get("LHFCALC", False): d["run_type"] = "HF" else: d["run_type"] = "GGA" except Exception as ex: print(str(ex)) logger.error("Unable to parse INCAR for killed run {}." .format(dir_name)) elif fnmatch(f, "KPOINTS*"): try: kpoints = Kpoints.from_file(filename) d["kpoints"] = kpoints.as_dict() except: logger.error("Unable to parse KPOINTS for killed run {}." .format(dir_name)) elif fnmatch(f, "POSCAR*"): try: s = Poscar.from_file(filename).structure comp = s.composition el_amt = s.composition.get_el_amt_dict() d.update({"unit_cell_formula": comp.as_dict(), "reduced_cell_formula": comp.to_reduced_dict, "elements": list(el_amt.keys()), "nelements": len(el_amt), "pretty_formula": comp.reduced_formula, "anonymous_formula": comp.anonymized_formula, "nsites": comp.num_atoms, "chemsys": "-".join(sorted(el_amt.keys()))}) d["poscar"] = s.as_dict() except: logger.error("Unable to parse POSCAR for killed run {}." .format(dir_name)) elif fnmatch(f, "POTCAR*"): try: potcar = Potcar.from_file(filename) d["pseudo_potential"] = { "functional": potcar.functional.lower(), "pot_type": "paw", "labels": potcar.symbols} except: logger.error("Unable to parse POTCAR for killed run in {}." .format(dir_name)) elif fnmatch(f, "OSZICAR"): try: d["oszicar"]["root"] = \ Oszicar(os.path.join(dir_name, f)).as_dict() except: logger.error("Unable to parse OSZICAR for killed run in {}." .format(dir_name)) elif re.match("relax\d", f): if os.path.exists(os.path.join(dir_name, f, "OSZICAR")): try: d["oszicar"][f] = Oszicar( os.path.join(dir_name, f, "OSZICAR")).as_dict() except: logger.error("Unable to parse OSZICAR for killed " "run in {}.".format(dir_name)) return d
def process_killed_run(self, dir_name)
Process a killed vasp run.
2.134681
2.103819
1.014669
vasprun_file = os.path.join(dir_name, filename) if self.parse_projected_eigen and (self.parse_projected_eigen != 'final' or \ taskname == self.runs[-1]): parse_projected_eigen = True else: parse_projected_eigen = False r = Vasprun(vasprun_file,parse_projected_eigen=parse_projected_eigen) d = r.as_dict() d["dir_name"] = os.path.abspath(dir_name) d["completed_at"] = \ str(datetime.datetime.fromtimestamp(os.path.getmtime( vasprun_file))) d["cif"] = str(CifWriter(r.final_structure)) d["density"] = r.final_structure.density if self.parse_dos and (self.parse_dos != 'final' \ or taskname == self.runs[-1]): try: d["dos"] = r.complete_dos.as_dict() except Exception: logger.warning("No valid dos data exist in {}.\n Skipping dos" .format(dir_name)) if taskname == "relax1" or taskname == "relax2": d["task"] = {"type": "aflow", "name": taskname} else: d["task"] = {"type": taskname, "name": taskname} d["oxide_type"] = oxide_type(r.final_structure) return d
def process_vasprun(self, dir_name, taskname, filename)
Process a vasprun.xml file.
3.105601
3.088836
1.005428
try: fullpath = os.path.abspath(dir_name) # Defensively copy the additional fields first. This is a MUST. # Otherwise, parallel updates will see the same object and inserts # will be overridden!! d = {k: v for k, v in self.additional_fields.items()} d["dir_name"] = fullpath d["schema_version"] = VaspToDbTaskDrone.__version__ d["calculations"] = [ self.process_vasprun(dir_name, taskname, filename) for taskname, filename in vasprun_files.items()] d1 = d["calculations"][0] d2 = d["calculations"][-1] # Now map some useful info to the root level. for root_key in ["completed_at", "nsites", "unit_cell_formula", "reduced_cell_formula", "pretty_formula", "elements", "nelements", "cif", "density", "is_hubbard", "hubbards", "run_type"]: d[root_key] = d2[root_key] d["chemsys"] = "-".join(sorted(d2["elements"])) # store any overrides to the exchange correlation functional xc = d2["input"]["incar"].get("GGA") if xc: xc = xc.upper() d["input"] = {"crystal": d1["input"]["crystal"], "is_lasph": d2["input"]["incar"].get("LASPH", False), "potcar_spec": d1["input"].get("potcar_spec"), "xc_override": xc} vals = sorted(d2["reduced_cell_formula"].values()) d["anonymous_formula"] = {string.ascii_uppercase[i]: float(vals[i]) for i in range(len(vals))} d["output"] = { "crystal": d2["output"]["crystal"], "final_energy": d2["output"]["final_energy"], "final_energy_per_atom": d2["output"]["final_energy_per_atom"]} d["name"] = "aflow" p = d2["input"]["potcar_type"][0].split("_") pot_type = p[0] functional = "lda" if len(pot_type) == 1 else "_".join(p[1:]) d["pseudo_potential"] = {"functional": functional.lower(), "pot_type": pot_type.lower(), "labels": d2["input"]["potcar"]} if len(d["calculations"]) == len(self.runs) or \ list(vasprun_files.keys())[0] != "relax1": d["state"] = "successful" if d2["has_vasp_completed"] \ else "unsuccessful" else: d["state"] = "stopped" d["analysis"] = get_basic_analysis_and_error_checks(d) sg = SpacegroupAnalyzer(Structure.from_dict(d["output"]["crystal"]), 0.1) d["spacegroup"] = {"symbol": sg.get_space_group_symbol(), "number": sg.get_space_group_number(), "point_group": sg.get_point_group_symbol(), "source": "spglib", "crystal_system": sg.get_crystal_system(), "hall": sg.get_hall()} d["oxide_type"] = d2["oxide_type"] d["last_updated"] = datetime.datetime.today() return d except Exception as ex: import traceback print(traceback.format_exc()) logger.error("Error in " + os.path.abspath(dir_name) + ".\n" + traceback.format_exc()) return None
def generate_doc(self, dir_name, vasprun_files)
Process aflow style runs, where each run is actually a combination of two vasp runs.
3.996917
3.988844
1.002024
(parent, subdirs, files) = path if set(self.runs).intersection(subdirs): return [parent] if not any([parent.endswith(os.sep + r) for r in self.runs]) and \ len(glob.glob(os.path.join(parent, "vasprun.xml*"))) > 0: return [parent] return []
def get_valid_paths(self, path)
There are some restrictions on the valid directory structures: 1. There can be only one vasp run in each directory. Nested directories are fine. 2. Directories designated "relax1", "relax2" are considered to be 2 parts of an aflow style run. 3. Directories containing vasp output with ".relax1" and ".relax2" are also considered as 2 parts of an aflow style run.
5.024497
4.545501
1.105378
# How to make different types of objects iterable dict_handler = lambda d: chain.from_iterable(d.items()) all_handlers = {tuple: iter, list: iter, deque: iter, dict: dict_handler, set: iter, frozenset: iter} all_handlers.update(handlers) # user handlers take precedence seen = set() # track which object id's have already been seen default_size = getsizeof(0) # estimate sizeof object without __sizeof__ def sizeof(o): "Calculate size of `o` and all its children" if id(o) in seen: # do not double count the same object return 0 seen.add(id(o)) if count: s = 1 else: s = getsizeof(o, default_size) # If `o` is iterable, add size of its members for typ, handler in all_handlers.items(): if isinstance(o, typ): s += sum(map(sizeof, handler(o))) break return s return sizeof(o)
def total_size(o, handlers={}, verbose=False, count=False)
Returns the approximate memory footprint an object and all of its contents. Automatically finds the contents of the following builtin containers and their subclasses: tuple, list, deque, dict, set and frozenset. To search other containers, add handlers to iterate over their contents: handlers = {SomeContainerClass: iter, OtherContainerClass: OtherContainerClass.get_elements} Source: http://code.activestate.com/recipes/577504/ (r3)
2.687706
2.50705
1.072059
if s is None: return {} d = {} for item in [e.strip() for e in s.split(",")]: try: key, value = item.split("=", 1) except ValueError: msg = "argument item '{}' not in form key=value".format(item) if _argparse_is_dumb: _alog.warn(msg) raise ValueError(msg) if key in d: msg = "Duplicate key for '{}' not allowed".format(key) if _argparse_is_dumb: _alog.warn(msg) raise ValueError(msg) d[key] = value return d
def args_kvp_nodup(s)
Parse argument string as key=value pairs separated by commas. :param s: Argument string :return: Parsed value :rtype: dict :raises: ValueError for format violations or a duplicated key.
2.868493
2.822174
1.016412
if isinstance(o, dict): d = o if self._dx is None else self._dx(o) return {k: self.walk(v) for k, v in d.items()} elif isinstance(o, list): return [self.walk(v) for v in o] else: return o if self._vx is None else self._vx(o)
def walk(self, o)
Walk a dict & transform.
2.377748
2.195105
1.083205
r = {} for k, v in o.items(): if isinstance(k, str): k = k.replace('$', '_') if "." in k: sub_r, keys = r, k.split('.') # create sub-dicts until last part of key for k2 in keys[:-1]: sub_r[k2] = {} sub_r = sub_r[k2] # descend # assign last part of key to value sub_r[keys[-1]] = v else: r[k] = v return r
def dict_expand(o)
Expand keys in a dict with '.' in them into sub-dictionaries, e.g. {'a.b.c': 'foo'} ==> {'a': {'b': {'c': 'foo'}}}
3.172555
3.385201
0.937184
self._collection_name = value self._mongo_coll = self.db[value] self.collection = TrackedCollection(self._mongo_coll, operation=self._t_op, field=self._t_field)
def collection_name(self, value)
Switch to another collection. Note that you may have to set the aliases and default properties if the schema of the new collection differs from the current collection.
6.799148
6.937899
0.980001
_log.info("tracked_find.begin") # if tracking is off, just call find (ie do nothing) if self._tracking_off: _log.info("tracked_find.end, tracking=off") return self._coll_find(*args, **kwargs) # otherwise do somethin' real # fish 'filter' out of args or kwargs if len(args) > 0: filt = args[0] else: if 'filter' not in kwargs: kwargs['filter'] = {} filt = kwargs['filter'] # update filter with tracker query filt.update(self._mark.query) # delegate to "real" find() _log.info("tracked_find.end, call: {}.find(args={} kwargs={})".format(self._coll.name, args, kwargs)) return self._coll_find(*args, **kwargs)
def tracked_find(self, *args, **kwargs)
Replacement for regular ``find()``.
4.954377
4.843614
1.022868
rec = self._c.find_one({}, {self._fld: 1}, sort=[(self._fld, -1)], limit=1) if rec is None: self._pos = self._empty_pos() elif not self._fld in rec: _log.error("Tracking field not found. field={} collection={}" .format(self._fld, self._c.name)) _log.warn("Continuing without tracking") self._pos = self._empty_pos() else: self._pos = {self._fld: rec[self._fld]} return self
def update(self)
Update the position of the mark in the collection. :return: this object, for chaining :rtype: Mark
4.320442
4.512583
0.957421
return {self.FLD_OP: self._op.name, self.FLD_MARK: self._pos, self.FLD_FLD: self._fld}
def as_dict(self)
Representation as a dict for JSON serialization.
6.741901
5.863218
1.149864
return Mark(collection=coll, operation=Operation[d[cls.FLD_OP]], pos=d[cls.FLD_MARK], field=d[cls.FLD_FLD])
def from_dict(cls, coll, d)
Construct from dict :param coll: Collection for the mark :param d: Input :type d: dict :return: new instance :rtype: Mark
9.803516
9.76625
1.003816
q = {} for field, value in self._pos.items(): if value is None: q.update({field: {'$exists': True}}) else: q.update({field: {'$gt': value}}) return q
def query(self)
A mongdb query expression to find all records with higher values for this mark's fields in the collection. :rtype: dict
3.635252
3.588473
1.013036
if self._track is None: self._track = self.db[self.tracking_collection_name]
def create(self)
Create tracking collection. Does nothing if tracking collection already exists.
10.076819
4.923018
2.046879
self._check_exists() obj = mark.as_dict() try: # Make a 'filter' to find/update existing record, which uses # the field name and operation (but not the position). filt = {k: obj[k] for k in (mark.FLD_FLD, mark.FLD_OP)} _log.debug("save: upsert-spec={} upsert-obj={}".format(filt, obj)) self._track.update(filt, obj, upsert=True) except pymongo.errors.PyMongoError as err: raise DBError("{}".format(err))
def save(self, mark)
Save a position in this collection. :param mark: The position to save :type mark: Mark :raises: DBError, NoTrackingCollection
7.441741
7.068297
1.052834
obj = self._get(operation, field) if obj is None: # empty Mark instance return Mark(collection=self.collection, operation=operation, field=field) return Mark.from_dict(self.collection, obj)
def retrieve(self, operation, field=None)
Retrieve a position in this collection. :param operation: Name of an operation :type operation: :class:`Operation` :param field: Name of field for sort order :type field: str :return: The position for this operation :rtype: Mark :raises: NoTrackingCollection
5.98779
5.932572
1.009307
self._check_exists() query = {Mark.FLD_OP: operation.name, Mark.FLD_MARK + "." + field: {"$exists": True}} return self._track.find_one(query)
def _get(self, operation, field)
Get tracked position for a given operation and field.
8.816965
7.269589
1.212856
self._collection_name = value self.collection = self.db[value]
def collection_name(self, value)
Switch to another collection. Note that you may have to set the aliases and default properties if the schema of the new collection differs from the current collection.
5.927942
5.416451
1.094433
if aliases_config is None: with open(os.path.join(os.path.dirname(__file__), "aliases.json")) as f: d = json.load(f) self.aliases = d.get("aliases", {}) self.default_criteria = d.get("defaults", {}) else: self.aliases = aliases_config.get("aliases", {}) self.default_criteria = aliases_config.get("defaults", {}) # set default properties if default_properties is None: self._default_props, self._default_prop_dict = None, None else: self._default_props, self._default_prop_dict = \ self._parse_properties(default_properties)
def set_aliases_and_defaults(self, aliases_config=None, default_properties=None)
Set the alias config and defaults to use. Typically used when switching to a collection with a different schema. Args: aliases_config: An alias dict to use. Defaults to None, which means the default aliases defined in "aliases.json" is used. See constructor for format. default_properties: List of property names (strings) to use by default, if no properties are given to the 'properties' argument of query().
2.069711
2.197824
0.941709
chemsys_list = [] for i in range(len(elements)): for combi in itertools.combinations(elements, i + 1): chemsys = "-".join(sorted(combi)) chemsys_list.append(chemsys) crit = {"chemsys": {"$in": chemsys_list}} if additional_criteria is not None: crit.update(additional_criteria) return self.get_entries(crit, inc_structure, optional_data=optional_data)
def get_entries_in_system(self, elements, inc_structure=False, optional_data=None, additional_criteria=None)
Gets all entries in a chemical system, e.g. Li-Fe-O will return all Li-O, Fe-O, Li-Fe, Li-Fe-O compounds. .. note:: The get_entries_in_system and get_entries methods should be used with care. In essence, all entries, GGA, GGA+U or otherwise, are returned. The dataset is very heterogeneous and not directly comparable. It is highly recommended that you perform post-processing using pymatgen.entries.compatibility. Args: elements: Sequence of element symbols, e.g. ['Li','Fe','O'] inc_structure: Optional parameter as to whether to include a structure with the ComputedEntry. Defaults to False. Use with care - including structures with a large number of entries can potentially slow down your code to a crawl. optional_data: Optional data to include with the entry. This allows the data to be access via entry.data[key]. additional_criteria: Added ability to provide additional criteria other than just the chemical system. Returns: List of ComputedEntries in the chemical system.
2.350423
2.796924
0.84036
all_entries = list() optional_data = [] if not optional_data else list(optional_data) optional_data.append("oxide_type") fields = [k for k in optional_data] fields.extend(["task_id", "unit_cell_formula", "energy", "is_hubbard", "hubbards", "pseudo_potential.labels", "pseudo_potential.functional", "run_type", "input.is_lasph", "input.xc_override", "input.potcar_spec"]) if inc_structure: fields.append("output.crystal") for c in self.query(fields, criteria): func = c["pseudo_potential.functional"] labels = c["pseudo_potential.labels"] symbols = ["{} {}".format(func, label) for label in labels] parameters = {"run_type": c["run_type"], "is_hubbard": c["is_hubbard"], "hubbards": c["hubbards"], "potcar_symbols": symbols, "is_lasph": c.get("input.is_lasph") or False, "potcar_spec": c.get("input.potcar_spec"), "xc_override": c.get("input.xc_override")} optional_data = {k: c[k] for k in optional_data} if inc_structure: struct = Structure.from_dict(c["output.crystal"]) entry = ComputedStructureEntry(struct, c["energy"], 0.0, parameters=parameters, data=optional_data, entry_id=c["task_id"]) else: entry = ComputedEntry(Composition(c["unit_cell_formula"]), c["energy"], 0.0, parameters=parameters, data=optional_data, entry_id=c["task_id"]) all_entries.append(entry) return all_entries
def get_entries(self, criteria, inc_structure=False, optional_data=None)
Get ComputedEntries satisfying a particular criteria. .. note:: The get_entries_in_system and get_entries methods should be used with care. In essence, all entries, GGA, GGA+U or otherwise, are returned. The dataset is very heterogeneous and not directly comparable. It is highly recommended that you perform post-processing using pymatgen.entries.compatibility. Args: criteria: Criteria obeying the same syntax as query. inc_structure: Optional parameter as to whether to include a structure with the ComputedEntry. Defaults to False. Use with care - including structures with a large number of entries can potentially slow down your code to a crawl. optional_data: Optional data to include with the entry. This allows the data to be access via entry.data[key]. Returns: List of pymatgen.entries.ComputedEntries satisfying criteria.
2.751872
2.673329
1.02938
if criteria is None: return dict() parsed_crit = dict() for k, v in self.default_criteria.items(): if k not in criteria: parsed_crit[self.aliases.get(k, k)] = v for key, crit in list(criteria.items()): if key in ["normalized_formula", "reduced_cell_formula"]: comp = Composition(crit) parsed_crit["pretty_formula"] = comp.reduced_formula elif key == "unit_cell_formula": comp = Composition(crit) crit = comp.as_dict() for el, amt in crit.items(): parsed_crit["{}.{}".format(self.aliases[key], el)] = amt parsed_crit["nelements"] = len(crit) parsed_crit['pretty_formula'] = comp.reduced_formula elif key in ["$or", "$and"]: parsed_crit[key] = [self._parse_criteria(m) for m in crit] else: parsed_crit[self.aliases.get(key, key)] = crit return parsed_crit
def _parse_criteria(self, criteria)
Internal method to perform mapping of criteria to proper mongo queries using aliases, as well as some useful sanitization. For example, string formulas such as "Fe2O3" are auto-converted to proper mongo queries of {"Fe":2, "O":3}. If 'criteria' is None, returns an empty dict. Putting this logic here simplifies callers and allows subclasses to insert something even when there are no criteria.
2.995147
2.809079
1.066238
return self.collection.ensure_index(key, unique=unique)
def ensure_index(self, key, unique=False)
Wrapper for pymongo.Collection.ensure_index
3.814793
3.611785
1.056207
if properties is not None: props, prop_dict = self._parse_properties(properties) else: props, prop_dict = None, None crit = self._parse_criteria(criteria) if self.query_post: for func in self.query_post: func(crit, props) cur = self.collection.find(filter=crit, projection=props, **kwargs) if distinct_key is not None: cur = cur.distinct(distinct_key) return QueryListResults(prop_dict, cur, postprocess=self.result_post) else: return QueryResults(prop_dict, cur, postprocess=self.result_post)
def query(self, properties=None, criteria=None, distinct_key=None, **kwargs)
Convenience method for database access. All properties and criteria can be specified using simplified names defined in Aliases. You can use the supported_properties property to get the list of supported properties. Results are returned as an iterator of dicts to ensure memory and cpu efficiency. Note that the dict returned have keys also in the simplified names form, not in the mongo format. For example, if you query for "analysis.e_above_hull", the returned result must be accessed as r['analysis.e_above_hull'] instead of mongo's r['analysis']['e_above_hull']. This is a *feature* of the query engine to allow simple access to deeply nested docs without having to resort to some recursion to go deep into the result. However, if you query for 'analysis', the entire 'analysis' key is returned as r['analysis'] and then the subkeys can be accessed in the usual form, i.e., r['analysis']['e_above_hull'] :param properties: Properties to query for. Defaults to None which means all supported properties. :param criteria: Criteria to query for as a dict. :param distinct_key: If not None, the key for which to get distinct results :param \*\*kwargs: Other kwargs supported by pymongo.collection.find. Useful examples are limit, skip, sort, etc. :return: A QueryResults Iterable, which is somewhat like pymongo's cursor except that it performs mapping. In general, the dev does not need to concern himself with the form. It is sufficient to know that the results are in the form of an iterable of dicts.
3.207715
3.089604
1.038228
props = {} # TODO: clean up prop_dict? prop_dict = OrderedDict() # We use a dict instead of list to provide for a richer syntax for p in properties: if p in self.aliases: if isinstance(properties, dict): props[self.aliases[p]] = properties[p] else: props[self.aliases[p]] = 1 prop_dict[p] = self.aliases[p].split(".") else: if isinstance(properties, dict): props[p] = properties[p] else: props[p] = 1 prop_dict[p] = p.split(".") # including a lower-level key after a higher level key e.g.: # {'output': 1, 'output.crystal': 1} instead of # {'output.crystal': 1, 'output': 1} # causes mongo to skip the other higher level keys. # this is a (sketchy) workaround for that. Note this problem # doesn't appear often in python2 because the dictionary ordering # is more stable. props = OrderedDict(sorted(props.items(), reverse=True)) return props, prop_dict
def _parse_properties(self, properties)
Make list of properties into 2 things: (1) dictionary of { 'aliased-field': 1, ... } for a mongodb query eg. {''} (2) dictionary, keyed by aliased field, for display
5.526664
5.115977
1.080275
for r in self.query(*args, **kwargs): return r return None
def query_one(self, *args, **kwargs)
Return first document from :meth:`query`, with same parameters.
4.438764
3.404433
1.303819
args = {'task_id': task_id} field = 'output.crystal' if final_structure else 'input.crystal' results = tuple(self.query([field], args)) if len(results) > 1: raise QueryError("More than one result found for task_id {}!".format(task_id)) elif len(results) == 0: raise QueryError("No structure found for task_id {}!".format(task_id)) c = results[0] return Structure.from_dict(c[field])
def get_structure_from_id(self, task_id, final_structure=True)
Returns a structure from the database given the task id. Args: task_id: The task_id to query for. final_structure: Whether to obtain the final or initial structure. Defaults to True.
3.003772
3.159487
0.950715
with open(config_file) as f: d = json.load(f) user = d["admin_user"] if use_admin else d["readonly_user"] password = d["admin_password"] if use_admin \ else d["readonly_password"] return QueryEngine( host=d["host"], port=d["port"], database=d["database"], user=user, password=password, collection=d["collection"], aliases_config=d.get("aliases_config", None))
def from_config(config_file, use_admin=False)
Initialize a QueryEngine from a JSON config file generated using mgdb init. Args: config_file: Filename of config file. use_admin: If True, the admin user and password in the config file is used. Otherwise, the readonly_user and password is used. Defaults to False. Returns: QueryEngine
2.727985
2.360053
1.1559
args = {'task_id': task_id} fields = ['calculations'] structure = self.get_structure_from_id(task_id) dosid = None for r in self.query(fields, args): dosid = r['calculations'][-1]['dos_fs_id'] if dosid is not None: self._fs = gridfs.GridFS(self.db, 'dos_fs') with self._fs.get(dosid) as dosfile: s = dosfile.read() try: d = json.loads(s) except: s = zlib.decompress(s) d = json.loads(s.decode("utf-8")) tdos = Dos.from_dict(d) pdoss = {} for i in range(len(d['pdos'])): ados = d['pdos'][i] all_ados = {} for j in range(len(ados)): orb = Orbital(j) odos = ados[str(orb)] all_ados[orb] = {Spin(int(k)): v for k, v in odos['densities'].items()} pdoss[structure[i]] = all_ados return CompleteDos(structure, tdos, pdoss) return None
def get_dos_from_id(self, task_id)
Overrides the get_dos_from_id for the MIT gridfs format.
3.507074
3.327745
1.053889
def wrapped(*args, **kwargs): ret_val = func(*args, **kwargs) if isinstance(ret_val, pymongo.cursor.Cursor): ret_val = self.from_cursor(ret_val) return ret_val return wrapped
def _wrapper(self, func)
This function wraps all callable objects returned by self.__getattr__. If the result is a cursor, wrap it into a QueryResults object so that you can invoke postprocess functions in self._pproc
2.638056
2.617975
1.00767
# Apply result_post funcs for pulling out sandbox properties for func in self._pproc: func(r) # If we haven't asked for specific properties, just return object if not self._prop_dict: result = r else: result = dict() # Map aliased keys back to original key for k, v in self._prop_dict.items(): try: result[k] = self._mapped_result_path(v[1:], data=r[v[0]]) except (IndexError, KeyError, ValueError): result[k] = None return result
def _mapped_result(self, r)
Transform/map a result.
7.065177
7.182022
0.983731
if not os.path.exists(path): raise SchemaPathError() filepat = "*." + ext if ext else "*" for f in glob.glob(os.path.join(path, filepat)): with open(f, 'r') as fp: try: schema = json.load(fp) except ValueError: raise SchemaParseError("error parsing '{}'".format(f)) name = os.path.splitext(os.path.basename(f))[0] schemata[name] = Schema(schema)
def add_schemas(path, ext="json")
Add schemas from files in 'path'. :param path: Path with schema files. Schemas are named by their file, with the extension stripped. e.g., if path is "/tmp/foo", then the schema in "/tmp/foo/bar.json" will be named "bar". :type path: str :param ext: File extension that identifies schema files :type ext: str :return: None :raise: SchemaPathError, if no such path. SchemaParseError, if a schema is not valid JSON.
2.697139
2.54092
1.061481
fp = open(file_or_fp, 'r') if isinstance(file_or_fp, str) else file_or_fp obj = json.load(fp) schema = Schema(obj) return schema
def load_schema(file_or_fp)
Load schema from file. :param file_or_fp: File name or file object :type file_or_fp: str, file :raise: IOError if file cannot be opened or read, ValueError if file is not valid JSON or JSON is not a valid schema.
2.188089
2.736925
0.79947
self._json_schema_keys = add_keys if self._json_schema is None: self._json_schema = self._build_schema(self._schema) return self._json_schema
def json_schema(self, **add_keys)
Convert our compact schema representation to the standard, but more verbose, JSON Schema standard. Example JSON schema: http://json-schema.org/examples.html Core standard: http://json-schema.org/latest/json-schema-core.html :param add_keys: Key, default value pairs to add in, e.g. description=""
3.198109
3.572671
0.895159
w = self._whatis(s) if w == self.IS_LIST: w0 = self._whatis(s[0]) js = {"type": "array", "items": {"type": self._jstype(w0, s[0])}} elif w == self.IS_DICT: js = {"type": "object", "properties": {key: self._build_schema(val) for key, val in s.items()}} req = [key for key, val in s.items() if not val.is_optional] if req: js["required"] = req else: js = {"type": self._jstype(w, s)} for k, v in self._json_schema_keys.items(): if k not in js: js[k] = v return js
def _build_schema(self, s)
Recursive schema builder, called by `json_schema`.
2.67095
2.515979
1.061595
if stype == self.IS_LIST: return "array" if stype == self.IS_DICT: return "object" if isinstance(sval, Scalar): return sval.jstype # it is a Schema, so return type of contents v = sval._schema return self._jstype(self._whatis(v), v)
def _jstype(self, stype, sval)
Get JavaScript name for given data type, called by `_build_schema`.
6.143893
5.226649
1.175494
v = str(db_version) return os.path.join(_top_dir, '..', 'schemata', 'versions', v)
def get_schema_dir(db_version=1)
Get path to directory with schemata. :param db_version: Version of the database :type db_version: int :return: Path :rtype: str
5.961636
7.285122
0.81833
d = get_schema_dir(db_version=db_version) schemafile = "{}.{}.json".format(db, collection) f = open(os.path.join(d, schemafile), "r") return f
def get_schema_file(db_version=1, db="mg_core", collection="materials")
Get file with appropriate schema. :param db_version: Version of the database :type db_version: int :param db: Name of database, e.g. 'mg_core' :type db: str :param collection: Name of collection, e.g. 'materials' :type collection: str :return: File with schema :rtype: file :raise: IOError, if file is not found or not accessible
3.195648
3.88748
0.822036
settings = yaml.load(_as_file(infile)) if not hasattr(settings, 'keys'): raise ValueError("Settings not found in {}".format(infile)) # Processing of namespaced parameters in .pmgrc.yaml. processed_settings = {} for k, v in settings.items(): if k.startswith("PMG_DB_"): processed_settings[k[7:].lower()] = v else: processed_settings[k] = v auth_aliases(processed_settings) return processed_settings
def get_settings(infile)
Read settings from input file. :param infile: Input file for JSON settings. :type infile: file or str path :return: Settings parsed from file :rtype: dict
5.4054
6.140615
0.88027
for alias, real in ((USER_KEY, "readonly_user"), (PASS_KEY, "readonly_password")): if alias in d: d[real] = d[alias] del d[alias]
def auth_aliases(d)
Interpret user/password aliases.
4.571385
4.138709
1.104544
U, P = USER_KEY, PASS_KEY # If user/password, un-prefixed, exists, do nothing. if U in settings and P in settings: return True # Set prefixes prefixes = [] if readonly_first: if readonly: prefixes.append("readonly_") if admin: prefixes.append("admin_") else: if admin: prefixes.append("admin_") if readonly: prefixes.append("readonly_") # Look for first user/password matching. found = False for pfx in prefixes: ukey, pkey = pfx + U, pfx + P if ukey in settings and pkey in settings: settings[U] = settings[ukey] settings[P] = settings[pkey] found = True break return found
def normalize_auth(settings, admin=True, readonly=True, readonly_first=False)
Transform the readonly/admin user and password to simple user/password, as expected by QueryEngine. If return value is true, then admin or readonly password will be in keys "user" and "password". :param settings: Connection settings :type settings: dict :param admin: Check for admin password :param readonly: Check for readonly password :param readonly_first: Check for readonly password before admin :return: Whether user/password were found :rtype: bool
2.85235
2.757812
1.03428
main_fmt, sub_fmt = fmt.split('/') if sub_fmt.lower() == "text": msg = MIMEText(text, "plain") elif sub_fmt.lower() == "html": msg = MIMEText(text, "html") else: raise ValueError("Unknown message format: {}".format(fmt)) msg['Subject'] = self._subject msg['From'] = self._sender msg['To'] = ', '.join(self._recipients) if self._port is None: conn_kwargs = dict(host=self._server) else: conn_kwargs = dict(host=self._server, port=self._port) self._log.info("connect to email server {}".format(conn_kwargs)) try: s = smtplib.SMTP(**conn_kwargs) #s.set_debuglevel(2) refused = s.sendmail(self._sender, self._recipients, msg.as_string()) if refused: self._log.warn("Email to {:d} recipients was refused".format(len(refused))) for person, (code, msg) in refused.items(): self._log("Email to {p} was refused ({c}): {m}".format(p=person, c=code, m=msg)) s.quit() n_recip = len(self._recipients) except Exception as err: self._log.error("connection to SMTP server failed: {}".format(err)) n_recip = 0 return n_recip
def send(self, text, fmt)
Send the email message. :param text: The text to send :type text: str :param fmt: The name of the format of the text :type fmt: str :return: Number of recipients it was sent to :rtype: int
2.461065
2.393669
1.028156
keyset, maxwid = set(), {} for r in rs: key = tuple(sorted(r.keys())) keyset.add(key) if key not in maxwid: maxwid[key] = [len(k) for k in key] for i, k in enumerate(key): strlen = len("{}".format(r[k])) maxwid[key][i] = max(maxwid[key][i], strlen) return keyset, maxwid
def result_subsets(self, rs)
Break a result set into subsets with the same keys. :param rs: Result set, rows of a result as a list of dicts :type rs: list of dict :return: A set with distinct keys (tuples), and a dict, by these tuples, of max. widths for each column
3.265946
2.491223
1.310981
columns = list(columns) # might be a tuple fixed_cols = [self.key] if section.lower() == "different": fixed_cols.extend([Differ.CHANGED_MATCH_KEY, Differ.CHANGED_OLD, Differ.CHANGED_NEW]) map(columns.remove, fixed_cols) columns.sort() return fixed_cols + columns
def ordered_cols(self, columns, section)
Return ordered list of columns, from given columns and the name of the section
6.825232
6.715484
1.016343
#print("@@ SORT ROWS:\n{}".format(rows)) # Section-specific determination of sort key if section.lower() == Differ.CHANGED.lower(): sort_key = Differ.CHANGED_DELTA else: sort_key = None if sort_key is not None: rows.sort(key=itemgetter(sort_key))
def sort_rows(self, rows, section)
Sort the rows, as appropriate for the section. :param rows: List of tuples (all same length, same values in each position) :param section: Name of section, should match const in Differ class :return: None; rows are sorted in-place
6.035353
5.084216
1.187077
self._add_meta(result) walker = JsonWalker(JsonWalker.value_json, JsonWalker.dict_expand) r = walker.walk(result) return r
def document(self, result)
Build dict for MongoDB, expanding result keys as we go.
9.801039
8.303707
1.180321
css = "\n".join(self.css) content = "{}{}".format(self._header(), self._body(result)) if self._email: text = .format(css=css, content=content, sty=self.styles["content"]["_"]) else: text = .format(css=css, content=content) return text
def format(self, result)
Generate HTML report. :return: Report body :rtype: str
6.658585
7.011383
0.949682
m = self.meta lines = ['-' * len(self.TITLE), self.TITLE, '-' * len(self.TITLE), "Compared: {db1} <-> {db2}".format(**m), "Filter: {filter}".format(**m), "Run time: {start_time} -- {end_time} ({elapsed:.1f} sec)".format(**m), ""] for section in result.keys(): lines.append("* " + section.title()) indent = " " * 4 if len(result[section]) == 0: lines.append("{}EMPTY".format(indent)) else: keyset, maxwid = self.result_subsets(result[section]) for columns in keyset: ocol = self.ordered_cols(columns, section) mw = maxwid[columns] mw_i = [columns.index(c) for c in ocol] # reorder indexes fmt = ' '.join(["{{:{:d}s}}".format(mw[i]) for i in mw_i]) lines.append("") lines.append(indent + fmt.format(*ocol)) lines.append(indent + '-_' * (sum(mw)/2 + len(columns))) rows = result[section] self.sort_rows(rows, section) for r in rows: key = tuple(sorted(r.keys())) if key == columns: values = [str(r[k]) for k in ocol] lines.append(indent + fmt.format(*values)) return '\n'.join(lines)
def format(self, result)
Generate plain text report. :return: Report body :rtype: str
4.116481
4.067521
1.012037
try: qe = clazz(**config.settings) except Exception as err: raise CreateQueryEngineError(clazz, config.settings, err) return qe
def create_query_engine(config, clazz)
Create and return new query engine object from the given `DBConfig` object. :param config: Database configuration :type config: dbconfig.DBConfig :param clazz: Class to use for creating query engine. Should act like query_engine.QueryEngine. :type clazz: class :return: New query engine
4.270379
5.595614
0.763165
if os.path.isdir(path): configs = glob.glob(_opj(path, pattern)) else: configs = [path] for config in configs: cfg = dbconfig.DBConfig(config_file=config) cs = cfg.settings if dbconfig.DB_KEY not in cs: raise ValueError("No database in '{}'".format(config)) if dbconfig.COLL_KEY in cs: name = "{}.{}".format(cs[dbconfig.DB_KEY], cs[dbconfig.COLL_KEY]) else: name = cs[dbconfig.DB_KEY] self.add(name, cfg) return self
def add_path(self, path, pattern="*.json")
Add configuration file(s) in `path`. The path can be a single file or a directory. If path is a directory, then `pattern` (Unix glob-style) will be used to get a list of all config files in the directory. The name given to each file is the database name and collection name (if any) combined with a '.'. :param path: File or directory name :return: self, for chaining
3.396121
3.025057
1.122664
self._d[name] = cfg if expand: self.expand(name) return self
def add(self, name, cfg, expand=False)
Add a configuration object. :param name: Name for later retrieval :param cfg: Configuration object :param expand: Flag for adding sub-configs for each sub-collection. See discussion in method doc. :return: self, for chaining :raises: CreateQueryEngineError (only if expand=True)
4.523746
7.116201
0.635697
if self._is_pattern(name): expr = re.compile(self._pattern_to_regex(name)) for cfg_name in self._d.keys(): if expr.match(cfg_name): self._expand(cfg_name) else: self._expand(name)
def expand(self, name)
Expand config for `name` by adding a sub-configuration for every dot-separated collection "below" the given one (or all, if none given). For example, for a database 'mydb' with collections ['spiderman.amazing', 'spiderman.spectacular', 'spiderman2'] and a configuration {'host':'foo', 'database':'mydb', 'collection':'spiderman'} then `expand("mydb.spiderman")` would add keys for 'spiderman.amazing' and 'spiderman.spectacular', but *not* 'spiderman2'. :param name: Name, or glob-style pattern, for DB configurations. :type name: basestring :return: None :raises: KeyError (if no such configuration)
3.282487
3.06913
1.069517
cfg = self._d[name] if cfg.collection is None: base_coll = '' else: base_coll = cfg.collection + self.SEP qe = self._get_qe(name, cfg) coll, db = qe.collection, qe.db cur_coll = coll.name for coll_name in db.collection_names(): if coll_name == cur_coll or not coll_name.startswith(base_coll): continue ex_cfg = cfg.copy() ex_cfg.collection = coll_name group_name = name + self.SEP + coll_name[len(base_coll):] self.add(group_name, ex_cfg, expand=False)
def _expand(self, name)
Perform real work of `expand()` function.
3.793854
3.733743
1.016099
delme = [] if self._is_pattern(name): expr = re.compile(self._pattern_to_regex(name)) for key, obj in self._cached.items(): if expr.match(key): delme.append(key) else: if name in self._cached: delme.append(name) for key in delme: del self._cached[key]
def uncache(self, name)
Remove all created query engines that match `name` from the cache (this disconnects from MongoDB, which is the point). :param name: Name used for :meth:`add`, or pattern :return: None
2.569784
2.73123
0.940889
if prefix is None: self._pfx = None else: self._pfx = prefix + self.SEP
def set_prefix(self, prefix=None)
Set prefix to use as a namespace for item lookup. A dot (.) will be automatically added to the given string. :param prefix: Prefix, or None to unset :return: None
4.772971
5.147606
0.927221
if key in self._cached: return self._cached[key] qe = create_query_engine(obj, self._class) self._cached[key] = qe return qe
def _get_qe(self, key, obj)
Instantiate a query engine, or retrieve a cached one.
3.536996
2.68769
1.315998
if not pattern.endswith("$"): pattern += "$" expr = re.compile(pattern) return list(filter(expr.match, self.keys()))
def re_keys(self, pattern)
Find keys matching `pattern`. :param pattern: Regular expression :return: Matching keys or empty list :rtype: list
3.305141
4.014382
0.823325
return {k: self[k] for k in self.re_keys(pattern)}
def re_get(self, pattern)
Return values whose key matches `pattern` :param pattern: Regular expression :return: Found values, as a dict.
6.575634
6.480856
1.014624
self._target_coll = target.collection if not crit: # reduce any False-y crit value to None crit = None cur = source.query(criteria=crit) _log.info("source.collection={} crit={} source_records={:d}" .format(source.collection, crit, len(cur))) return cur
def get_items(self, source=None, target=None, crit=None)
Copy records from source to target collection. :param source: Input collection :type source: QueryEngine :param target: Output collection :type target: QueryEngine :param crit: Filter criteria, e.g. "{ 'flag': True }". :type crit: dict
8.021866
8.448709
0.949478
doc = fn.__doc__ params, return_ = {}, {} param_order = [] for line in doc.split("\n"): line = line.strip() if line.startswith(":param"): _, name, desc = line.split(":", 2) name = name[6:].strip() # skip 'param ' params[name] = {'desc': desc.strip()} param_order.append(name) elif line.startswith(":type"): _, name, desc = line.split(":", 2) name = name[5:].strip() # skip 'type ' if not name in params: raise ValueError("'type' without 'param' for {}".format(name)) params[name]['type'] = desc.strip() elif line.startswith(":return"): _1, _2, desc = line.split(":", 2) return_['desc'] = desc elif line.startswith(":rtype"): _1, _2, desc = line.split(":", 2) return_['type'] = desc.strip() return params
def parse_fn_docstring(fn)
Get parameter and return types from function's docstring. Docstrings must use this format:: :param foo: What is foo :type foo: int :return: What is returned :rtype: double :return: A map of names, each with keys 'type' and 'desc'. :rtype: tuple(dict)
2.068426
2.066459
1.000952
merged = copy.copy(sandbox_collections) # create/clear target collection target = merged.database[new_tasks] if wipe: _log.debug("merge_tasks.wipe.begin") target.remove() merged.database['counter'].remove() _log.debug("merge_tasks.wipe.end") # perform the merge batch = [] for doc in core_collections.tasks.find(): batch.append(doc) if len(batch) == batch_size: target.insert(batch) batch = [] if batch: target.insert(batch) batch = [] for doc in sandbox_collections.tasks.find(): doc['task_id'] = id_prefix + '-' + str(doc['task_id']) batch.append(doc) if len(batch) == batch_size: target.insert(batch) batch = [] if batch: target.insert(batch)
def merge_tasks(core_collections, sandbox_collections, id_prefix, new_tasks, batch_size=100, wipe=False)
Merge core and sandbox collections into a temporary collection in the sandbox. :param core_collections: Core collection info :type core_collections: Collections :param sandbox_collections: Sandbox collection info :type sandbox_collections: Collections
2.622543
2.773457
0.945586
sep = '\n' + ' ' * depth * indent return ''.join( ("{}: {}{}".format( k, alphadump(d[k], depth=depth+1) if isinstance(d[k], dict) else str(d[k]), sep) for k in sorted(d.keys())) )
def alphadump(d, indent=2, depth=0)
Dump a dict to a str, with keys in alphabetical order.
2.970417
3.02683
0.981362
for collection, doc in self.examples(): _log.debug("validating example in collection {}".format(collection)) sch = schema.get_schema(collection) # with more err. checking result = sch.validate(doc) _log.debug("validation result: {}".format("OK" if result is None else result)) if result is not None: fail_fn("Failed to validate sample document: {}".format(result))
def validate_examples(self, fail_fn)
Check the examples against the schema. :param fail_fn: Pass failure messages to this function :type fail_fn: function(str)
5.96017
5.873982
1.014673
user_kw = {} if user_kw is None else user_kw build_kw = {} if build_kw is None else build_kw n = self._build(self.get_items(**user_kw), **build_kw) finalized = self.finalize(self._status.has_failures()) if not finalized: _log.error("Finalization failed") return n
def run(self, user_kw=None, build_kw=None)
Run the builder. :param user_kw: keywords from user :type user_kw: dict :param build_kw: internal settings :type build_kw: dict :return: Number of items processed :rtype: int
3.928772
4.105263
0.957009
if isinstance(config, str): conn = dbutil.get_database(config_file=config) elif isinstance(config, dict): conn = dbutil.get_database(settings=config) else: raise ValueError("Configuration, '{}', must be a path to " "a configuration file or dict".format(config)) return conn
def connect(self, config)
Connect to database with given configuration, which may be a dict or a path to a pymatgen-db configuration.
4.070602
3.655414
1.113582
_log.debug("_build, chunk_size={:d}".format(chunk_size)) n, i = 0, 0 for i, item in enumerate(items): if i == 0: _log.debug("_build, first item") if 0 == (i + 1) % chunk_size: if self._seq: self._run(0) else: self._run_parallel_fn() # process the chunk if self._status.has_failures(): break n = i + 1 self._queue.put(item) # process final chunk if self._seq: self._run(0) else: self._run_parallel_fn() if not self._status.has_failures(): n = i + 1 return n
def _build(self, items, chunk_size=10000)
Build the output, in chunks. :return: Number of items processed :rtype: int
3.386686
3.353618
1.00986
_log.debug("run.parallel.multiprocess.start") processes = [] ProcRunner.instance = self for i in range(self._ncores): self._status.running(i) proc = multiprocessing.Process(target=ProcRunner.run, args=(i,)) proc.start() processes.append(proc) for i in range(self._ncores): processes[i].join() code = processes[i].exitcode self._status.success(i) if 0 == code else self._status.fail(i) _log.debug("run.parallel.multiprocess.end states={}".format(self._status))
def _run_parallel_multiprocess(self)
Run processes from queue
3.201176
3.050891
1.049259
while 1: try: item = self._queue.get(timeout=2) self.process_item(item) except Queue.Empty: break except Exception as err: _log.error("In _run(): {}".format(err)) if _log.isEnabledFor(logging.DEBUG): _log.error(traceback.format_exc()) self._status.fail(index) raise self._status.success(index)
def _run(self, index)
Run method for one thread or process Just pull an item off the queue and process it, until the queue is empty. :param index: Sequential index of this process or thread :type index: int
3.189753
3.133356
1.017999
def _keys(x, pre=''): for k in x: yield (pre + k) if isinstance(x[k], dict): for nested in _keys(x[k], pre + k + sep): yield nested return list(_keys(coll.find_one()))
def collection_keys(coll, sep='.')
Get a list of all (including nested) keys in a collection. Examines the first document in the collection. :param sep: Separator for nested keys :return: List of str
3.925681
3.656138
1.073723
if len(d) == 0: return "{}" return "{" + ', '.join(["'{}': {}".format(k, quotable(v)) for k, v in d.items()]) + "}"
def csv_dict(d)
Format dict to a string with comma-separated values.
3.679791
3.571558
1.030304
return ', '.join( ["{}={}".format(k, quotable(v)) for k, v in d.items()])
def kvp_dict(d)
Format dict to key=value pairs.
4.828007
4.573187
1.05572
self._groups = self.shared_dict() self._target_coll = target.collection self._src = source return source.query()
def get_items(self, source=None, target=None)
Get all records from source collection to add to target. :param source: Input collection :type source: QueryEngine :param target: Output collection :type target: QueryEngine
17.708933
15.037261
1.17767
group, value = item['group'], item['value'] if group in self._groups: cur_val = self._groups[group] self._groups[group] = max(cur_val, value) else: # New group. Could fetch old max. from target collection, # but for the sake of illustration recalculate it from # the source collection. self._src.tracking = False # examine entire collection new_max = value for rec in self._src.query(criteria={'group': group}, properties=['value']): new_max = max(new_max, rec['value']) self._src.tracking = True # back to incremental mode # calculate new max self._groups[group] = new_max
def process_item(self, item)
Calculate new maximum value for each group, for "new" items only.
5.810564
5.053009
1.149921
for group, value in self._groups.items(): doc = {'group': group, 'value': value} self._target_coll.update({'group': group}, doc, upsert=True) return True
def finalize(self, errs)
Update target collection with calculated maximum values.
6.316765
3.754188
1.682591
if not rec: return default if not isinstance(rec, collections.Mapping): raise ValueError('input record must act like a dict') if not '.' in key: return rec.get(key, default) for key_part in key.split('.'): if not isinstance(rec, collections.Mapping): return default if not key_part in rec: return default rec = rec[key_part] return rec
def mongo_get(rec, key, default=None)
Get value from dict using MongoDB dot-separated path semantics. For example: >>> assert mongo_get({'a': {'b': 1}, 'x': 2}, 'a.b') == 1 >>> assert mongo_get({'a': {'b': 1}, 'x': 2}, 'x') == 2 >>> assert mongo_get({'a': {'b': 1}, 'x': 2}, 'a.b.c') is None :param rec: mongodb document :param key: path to mongo value :param default: default to return if not found :return: value, potentially nested, or default if not found :raise: ValueError, if record is not a dict.
2.486252
2.539134
0.979173
if field.has_subfield(): self._fields[field.full_name] = 1 else: self._fields[field.name] = 1 if op and op.is_size() and not op.is_variable(): # get minimal part of array with slicing, # but cannot use slice with variables self._slices[field.name] = val + 1 if op and op.is_variable(): # add the variable too self._fields[val] = 1
def add(self, field, op=None, val=None)
Update report fields to include new one, if it doesn't already. :param field: The field to include :type field: Field :param op: Operation :type op: ConstraintOperator :return: None
7.287474
7.776165
0.937155
d = copy.copy(self._fields) for k, v in self._slices.items(): d[k] = {'$slice': v} return d
def to_mongo(self)
Translate projection to MongoDB query form. :return: Dictionary to put into a MongoDB JSON query :rtype: dict
4.883687
5.801854
0.841746
rec = {} if record is None else record for v in violations: self._viol.append((v, rec))
def add_violations(self, violations, record=None)
Add constraint violations and associated record. :param violations: List of violations :type violations: list(ConstraintViolation) :param record: Associated record :type record: dict :rtype: None
6.368894
9.079464
0.701461
# extract filter and constraints try: fltr = item[self.FILTER_SECT] except KeyError: raise ValueError("configuration requires '{}'".format(self.FILTER_SECT)) sample = item.get(self.SAMPLE_SECT, None) constraints = item.get(self.CONSTRAINT_SECT, None) section = ConstraintSpecSection(fltr, constraints, sample) key = section.get_key() if key in self._sections: self._sections[key].append(section) else: self._sections[key] = [section]
def _add_complex_section(self, item)
Add a section that has a filter and set of constraints :raise: ValueError if filter or constraints is missing
3.788126
3.278013
1.155616
self._spec = constraint_spec self._progress.set_subject(subject) self._build(constraint_spec) for sect_parts in self._sections: cvg = self._validate_section(subject, coll, sect_parts) if cvg is not None: yield cvg
def validate(self, coll, constraint_spec, subject='collection')
Validation of a collection. This is a generator that yields ConstraintViolationGroups. :param coll: Mongo collection :type coll: pymongo.Collection :param constraint_spec: Constraint specification :type constraint_spec: ConstraintSpec :param subject: Name of the thing being validated :type subject: str :return: Sets of constraint violation, one for each constraint_section :rtype: ConstraintViolationGroup :raises: ValidatorSyntaxError
6.475328
5.487815
1.179946
cvgroup = ConstraintViolationGroup() cvgroup.subject = subject # If the constraint is an 'import' of code, treat it differently here # if self._is_python(parts): # num_found = self._run_python(cvgroup, coll, parts) # return None if num_found == 0 else cvgroup query = parts.cond.to_mongo(disjunction=False) query.update(parts.body.to_mongo()) cvgroup.condition = parts.cond.to_mongo(disjunction=False) self._log.debug('Query spec: {}'.format(query)) self._log.debug('Query fields: {}'.format(parts.report_fields)) # Find records that violate 1 or more constraints cursor = coll.find(query, parts.report_fields, **self._find_kw) if parts.sampler is not None: cursor = parts.sampler.sample(cursor) nbytes, num_dberr, num_rec = 0, 0, 0 while 1: try: record = next(cursor) nbytes += total_size(record) num_rec += 1 except StopIteration: self._log.info("collection {}: {:d} records, {:d} bytes, {:d} db-errors" .format(subject, num_rec, nbytes, num_dberr)) break except pymongo.errors.PyMongoError as err: num_dberr += 1 if num_dberr > self._max_dberr > 0: raise DBError("Too many errors") self._log.warn("DB.{:d}: {}".format(num_dberr, err)) continue # report progress if self._progress: self._progress.update(num_dberr, nbytes) # get reasons for badness violations = self._get_violations(parts.body, record) cvgroup.add_violations(violations, record) return None if nbytes == 0 else cvgroup
def _validate_section(self, subject, coll, parts)
Validate one section of a spec. :param subject: Name of subject :type subject: str :param coll: The collection to validate :type coll: pymongo.Collection :param parts: Section parts :type parts: Validator.SectionParts :return: Group of constraint violations, if any, otherwise None :rtype: ConstraintViolationGroup or None
4.255668
4.050648
1.050614
# special case, when no constraints are given if len(query.all_clauses) == 0: return [NullConstraintViolation()] # normal case, check all the constraints reasons = [] for clause in query.all_clauses: var_name = None key = clause.constraint.field.name op = clause.constraint.op fval = mongo_get(record, key) if fval is None: expected = clause.constraint.value reasons.append(ConstraintViolation(clause.constraint, 'missing', expected)) continue if op.is_variable(): # retrieve value for variable var_name = clause.constraint.value value = mongo_get(record, var_name, default=None) if value is None: reasons.append(ConstraintViolation(clause.constraint, 'missing', var_name)) continue clause.constraint.value = value # swap out value, temporarily # take length for size if op.is_size(): if isinstance(fval, str) or not hasattr(fval, '__len__'): reasons.append(ConstraintViolation(clause.constraint, type(fval), 'sequence')) if op.is_variable(): clause.constraint.value = var_name # put original value back continue fval = len(fval) ok, expected = clause.constraint.passes(fval) if not ok: reasons.append(ConstraintViolation(clause.constraint, fval, expected)) if op.is_variable(): clause.constraint.value = var_name # put original value back return reasons
def _get_violations(self, query, record)
Reverse-engineer the query to figure out why a record was selected. :param query: MongoDB query :type query: MongQuery :param record: Record in question :type record: dict :return: Reasons why bad :rtype: list(ConstraintViolation)
3.351036
3.224913
1.039109
self._sections = [] # For each condition in the spec for sval in constraint_spec: rpt_fld = self._base_report_fields.copy() #print("@@ CONDS = {}".format(sval.filters)) #print("@@ MAIN = {}".format(sval.constraints)) # Constraints # If the constraint is an external call to Python code if self._is_python(sval.constraints): query, proj = self._process_python(sval.constraints) rpt_fld.update(proj.to_mongo()) # All other constraints, e.g. 'foo > 12' else: query = MongoQuery() if sval.constraints is not None: groups = self._process_constraint_expressions(sval.constraints) projection = Projection() for cg in groups.values(): for c in cg: projection.add(c.field, c.op, c.value) query.add_clause(MongoClause(c)) if self._add_exists: for c in cg.existence_constraints: query.add_clause(MongoClause(c, exists_main=True)) rpt_fld.update(projection.to_mongo()) # Filters cond_query = MongoQuery() if sval.filters is not None: cond_groups = self._process_constraint_expressions(sval.filters, rev=False) for cg in cond_groups.values(): for c in cg: cond_query.add_clause(MongoClause(c, rev=False)) # Done. Add a new 'SectionPart' for the filter and constraint result = self.SectionParts(cond_query, query, sval.sampler, rpt_fld) self._sections.append(result)
def _build(self, constraint_spec)
Generate queries to execute. Sets instance variables so that Mongo query strings, etc. can now be extracted from the object. :param constraint_spec: Constraint specification :type constraint_spec: ConstraintSpec
4.866354
4.772777
1.019606
# process expressions, grouping by field groups = {} for expr in expr_list: field, raw_op, val = parse_expr(expr) op = ConstraintOperator(raw_op) if field not in groups: groups[field] = ConstraintGroup(Field(field, self._aliases)) groups[field].add_constraint(op, val) # add existence constraints for cgroup in groups.values(): cgroup.add_existence(rev) # optionally check for conflicts if conflict_check: # check for conflicts in each group for field_name, group in groups.items(): conflicts = group.get_conflicts() if conflicts: raise ValueError('Conflicts for field {}: {}'.format(field_name, conflicts)) return groups
def _process_constraint_expressions(self, expr_list, conflict_check=True, rev=True)
Create and return constraints from expressions in expr_list. :param expr_list: The expressions :conflict_check: If True, check for conflicting expressions within each field :return: Constraints grouped by field (the key is the field name) :rtype: dict
3.336076
3.304276
1.009624
if len(constraint_list) == 1 and \ PythonMethod.constraint_is_method(constraint_list[0]): return True if len(constraint_list) > 1 and \ any(filter(PythonMethod.constraint_is_method, constraint_list)): condensed_list = '/'.join(constraint_list) err = PythonMethod.CANNOT_COMBINE_ERR raise ValidatorSyntaxError(condensed_list, err) return False
def _is_python(self, constraint_list)
Check whether constraint is an import of Python code. :param constraint_list: List of raw constraints from YAML file :type constraint_list: list(str) :return: True if this refers to an import of code, False otherwise :raises: ValidatorSyntaxError
4.436063
4.475821
0.991117
"Set aliases and wrap errors in ValueError" try: self.aliases = new_value except Exception as err: raise ValueError("invalid value: {}".format(err))
def set_aliases(self, new_value)
Set aliases and wrap errors in ValueError
7.743413
4.195139
1.845806
count = cursor.count() # special case: empty collection if count == 0: self._empty = True raise ValueError("Empty collection") # special case: entire collection if self.p >= 1 and self.max_items <= 0: for item in cursor: yield item return # calculate target number of items to select if self.max_items <= 0: n_target = max(self.min_items, self.p * count) else: if self.p <= 0: n_target = max(self.min_items, self.max_items) else: n_target = max(self.min_items, min(self.max_items, self.p * count)) if n_target == 0: raise ValueError("No items requested") # select first `n_target` items that pop up with # probability self.p # This is actually biased to items at the beginning # of the file if n_target is smaller than (p * count), n = 0 while n < n_target: try: item = next(cursor) except StopIteration: # need to keep looping through data until # we get all our items! cursor.rewind() item = next(cursor) if self._keep(): yield item n += 1
def sample(self, cursor)
Extract records randomly from the database. Continue until the target proportion of the items have been extracted, or until `min_items` if this is larger. If `max_items` is non-negative, do not extract more than these. This function is a generator, yielding items incrementally. :param cursor: Cursor to sample :type cursor: pymongo.cursor.Cursor :return: yields each item :rtype: dict :raise: ValueError, if max_items is valid and less than `min_items` or if target collection is empty
3.938018
3.540545
1.112263
print 'The following LiveSync agents are available:' for name, backend in current_plugin.backend_classes.iteritems(): print cformat(' - %{white!}{}%{reset}: {} ({})').format(name, backend.title, backend.description)
def available_backends()
Lists the currently available backend types
12.005801
12.448941
0.964403
print 'The following LiveSync agents are active:' agent_list = LiveSyncAgent.find().order_by(LiveSyncAgent.backend_name, db.func.lower(LiveSyncAgent.name)).all() table_data = [['ID', 'Name', 'Backend', 'Initial Export', 'Queue']] for agent in agent_list: initial = (cformat('%{green!}done%{reset}') if agent.initial_data_exported else cformat('%{yellow!}pending%{reset}')) if agent.backend is None: backend_title = cformat('%{red!}invalid backend ({})%{reset}').format(agent.backend_name) else: backend_title = agent.backend.title table_data.append([unicode(agent.id), agent.name, backend_title, initial, unicode(agent.queue.filter_by(processed=False).count())]) table = AsciiTable(table_data) table.justify_columns[4] = 'right' print table.table if not all(a.initial_data_exported for a in agent_list): print print "You need to perform the initial data export for some agents." print cformat("To do so, run " "%{yellow!}indico livesync initial_export %{reset}%{yellow}<agent_id>%{reset} for those agents.")
def agents()
Lists the currently active agents
4.043992
3.972431
1.018014
agent = LiveSyncAgent.find_first(id=agent_id) if agent is None: print 'No such agent' return if agent.backend is None: print cformat('Cannot run agent %{red!}{}%{reset} (backend not found)').format(agent.name) return print cformat('Selected agent: %{white!}{}%{reset} ({})').format(agent.name, agent.backend.title) if agent.initial_data_exported and not force: print 'The initial export has already been performed for this agent.' print cformat('To re-run it, use %{yellow!}--force%{reset}') return agent.create_backend().run_initial_export(Event.find(is_deleted=False)) agent.initial_data_exported = True db.session.commit()
def initial_export(agent_id, force)
Performs the initial data export for an agent
4.856945
4.800968
1.011659
if agent_id is None: agent_list = LiveSyncAgent.find_all() else: agent = LiveSyncAgent.find_first(id=agent_id) if agent is None: print 'No such agent' return agent_list = [agent] for agent in agent_list: if agent.backend is None: print cformat('Skipping agent: %{red!}{}%{reset} (backend not found)').format(agent.name) continue if not agent.initial_data_exported and not force: print cformat('Skipping agent: %{red!}{}%{reset} (initial export not performed)').format(agent.name) continue print cformat('Running agent: %{white!}{}%{reset}').format(agent.name) try: agent.create_backend().run() db.session.commit() except: db.session.rollback() raise
def run(agent_id, force=False)
Runs the livesync agent
3.003202
2.807198
1.069822