sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def lipd_read(path): """ Loads a LiPD file from local path. Unzip, read, and process data Steps: create tmp, unzip lipd, read files into memory, manipulate data, move to original dir, delete tmp. :param str path: Source path :return none: """ _j = {} dir_original = os.getcwd() # Import metadata into object try: print("reading: {}".format(print_filename(path))) # bigger than 2mb file? This could take a while if os.stat(path).st_size > 1000000: _size = os.stat(path).st_size print("{} :That's a big file! This may take a while to load...".format("{} MB".format(round(_size/1000000,2)))) dir_tmp = create_tmp_dir() unzipper(path, dir_tmp) os.chdir(dir_tmp) _dir_data = find_files() os.chdir(_dir_data) _j = read_jsonld() _j = rm_empty_fields(_j) _j = check_dsn(path, _j) _j = update_lipd_version(_j) _j = idx_num_to_name(_j) _j = rm_empty_doi(_j) _j = rm_empty_fields(_j) _j = put_tsids(_j) _csvs = read_csvs() _j = merge_csv_metadata(_j, _csvs) # Why ? Because we need to align the csv filenames with the table filenames. We don't need the csv output here. _j, _csv = get_csv_from_metadata(_j["dataSetName"], _j) os.chdir(dir_original) shutil.rmtree(dir_tmp) except FileNotFoundError: print("Error: lipd_read: LiPD file not found. Please make sure the filename includes the .lpd extension") except Exception as e: logger_lipd.error("lipd_read: {}".format(e)) print("Error: lipd_read: unable to read LiPD: {}".format(e)) os.chdir(dir_original) logger_lipd.info("lipd_read: record loaded: {}".format(path)) return _j
Loads a LiPD file from local path. Unzip, read, and process data Steps: create tmp, unzip lipd, read files into memory, manipulate data, move to original dir, delete tmp. :param str path: Source path :return none:
entailment
def lipd_write(_json, path): """ Saves current state of LiPD object data. Outputs to a LiPD file. Steps: create tmp, create bag dir, get dsn, splice csv from json, write csv, clean json, write json, create bagit, zip up bag folder, place lipd in target dst, move to original dir, delete tmp :param dict _json: Metadata :param str path: Destination path :return none: """ # Json is pass by reference. Make a copy so we don't mess up the original data. _json_tmp = copy.deepcopy(_json) dir_original = os.getcwd() try: dir_tmp = create_tmp_dir() dir_bag = os.path.join(dir_tmp, "bag") os.mkdir(dir_bag) os.chdir(dir_bag) _dsn = get_dsn(_json_tmp) _dsn_lpd = _dsn + ".lpd" _json_tmp, _csv = get_csv_from_metadata(_dsn, _json_tmp) write_csv_to_file(_csv) _json_tmp = rm_values_fields(_json_tmp) _json_tmp = put_tsids(_json_tmp) _json_tmp = idx_name_to_num(_json_tmp) write_json_to_file(_json_tmp) create_bag(dir_bag) rm_file_if_exists(path, _dsn_lpd) zipper(root_dir=dir_tmp, name="bag", path_name_ext=os.path.join(path, _dsn_lpd)) os.chdir(dir_original) shutil.rmtree(dir_tmp) except Exception as e: logger_lipd.error("lipd_write: {}".format(e)) print("Error: lipd_write: {}".format(e)) return
Saves current state of LiPD object data. Outputs to a LiPD file. Steps: create tmp, create bag dir, get dsn, splice csv from json, write csv, clean json, write json, create bagit, zip up bag folder, place lipd in target dst, move to original dir, delete tmp :param dict _json: Metadata :param str path: Destination path :return none:
entailment
def _bulk_size_generator(num_records, bulk_size, active): """ Generate bulk_size until num_records is reached or active becomes false >>> gen = _bulk_size_generator(155, 50, [True]) >>> list(gen) [50, 50, 50, 5] """ while active and num_records > 0: req_size = min(num_records, bulk_size) num_records -= req_size yield req_size
Generate bulk_size until num_records is reached or active becomes false >>> gen = _bulk_size_generator(155, 50, [True]) >>> list(gen) [50, 50, 50, 5]
entailment
def insert_fake_data(hosts=None, table=None, num_records=1e5, bulk_size=1000, concurrency=25, mapping_file=None): """Generate random data and insert it into a table. This will read the table schema and then find suitable random data providers. Which provider is choosen depends on the column name and data type. Example: A column named `name` will map to the `name` provider. A column named `x` of type int will map to `random_int` because there is no `x` provider. Available providers are listed here: https://faker.readthedocs.io/en/latest/providers.html Additional providers: - auto_inc: Returns unique incrementing numbers. Automatically used for columns named "id" of type int or long - geo_point Returns [<lon>, <lat>] Automatically used for columns of type geo_point Args: hosts: <host>:[<port>] of the Crate node table: The table name into which the data should be inserted. Either fully qualified: `<schema>.<table>` or just `<table>` num_records: Number of records to insert. Usually a number but expressions like `1e4` work as well. bulk_size: The bulk size of the insert statements. concurrency: How many operations to run concurrently. mapping_file: A JSON file that defines a mapping from column name to fake-factory provider. The format is as follows: { "column_name": ["provider_with_args", ["arg1", "arg"]], "x": ["provider_with_args", ["arg1"]], "y": "provider_without_args" } """ with clients.client(hosts, concurrency=1) as client: schema, table_name = parse_table(table) columns = retrieve_columns(client, schema, table_name) if not columns: sys.exit('Could not find columns for table "{}"'.format(table)) print('Found schema: ') print(json.dumps(columns, sort_keys=True, indent=4)) mapping = None if mapping_file: mapping = json.load(mapping_file) bulk_size = min(num_records, bulk_size) num_inserts = int(math.ceil(num_records / bulk_size)) gen_row = create_row_generator(columns, mapping) stmt = to_insert('"{schema}"."{table_name}"'.format(**locals()), columns)[0] print('Using insert statement: ') print(stmt) print('Will make {} requests with a bulk size of {}'.format( num_inserts, bulk_size)) print('Generating fake data and executing inserts') q = asyncio.Queue(maxsize=concurrency) with clients.client(hosts, concurrency=concurrency) as client: active = [True] def stop(): asyncio.ensure_future(q.put(None)) active.clear() loop.remove_signal_handler(signal.SIGINT) if sys.platform != 'win32': loop.add_signal_handler(signal.SIGINT, stop) bulk_seq = _bulk_size_generator(num_records, bulk_size, active) with ThreadPoolExecutor() as e: tasks = asyncio.gather( _gen_data_and_insert(q, e, client, stmt, gen_row, bulk_seq), consume(q, total=num_inserts) ) loop.run_until_complete(tasks)
Generate random data and insert it into a table. This will read the table schema and then find suitable random data providers. Which provider is choosen depends on the column name and data type. Example: A column named `name` will map to the `name` provider. A column named `x` of type int will map to `random_int` because there is no `x` provider. Available providers are listed here: https://faker.readthedocs.io/en/latest/providers.html Additional providers: - auto_inc: Returns unique incrementing numbers. Automatically used for columns named "id" of type int or long - geo_point Returns [<lon>, <lat>] Automatically used for columns of type geo_point Args: hosts: <host>:[<port>] of the Crate node table: The table name into which the data should be inserted. Either fully qualified: `<schema>.<table>` or just `<table>` num_records: Number of records to insert. Usually a number but expressions like `1e4` work as well. bulk_size: The bulk size of the insert statements. concurrency: How many operations to run concurrently. mapping_file: A JSON file that defines a mapping from column name to fake-factory provider. The format is as follows: { "column_name": ["provider_with_args", ["arg1", "arg"]], "x": ["provider_with_args", ["arg1"]], "y": "provider_without_args" }
entailment
def main(self): """ Main function that gets file(s), creates outputs, and runs all operations. :return dict: Updated or original data for jsonld file """ logger_doi_resolver.info("enter doi_resolver") for idx, pub in enumerate(self.root_dict['pub']): # Retrieve DOI id key-value from the root_dict doi_string, doi_found = self.find_doi(pub) if doi_found: logger_doi_resolver.info("doi found: {}".format(doi_string)) # Empty list for no match, or list of 1+ matching DOI id strings doi_list = clean_doi(doi_string) if not doi_list: self.illegal_doi(doi_string) else: for doi_id in doi_list: self.get_data(doi_id, idx) else: logger_doi_resolver.warn("doi not found: publication index: {}".format(self.name, idx)) self.root_dict['pub'][idx]['pubDataUrl'] = 'Manually Entered' logger_doi_resolver.info("exit doi_resolver") return rm_empty_fields(self.root_dict)
Main function that gets file(s), creates outputs, and runs all operations. :return dict: Updated or original data for jsonld file
entailment
def compare_replace(pub_dict, fetch_dict): """ Take in our Original Pub, and Fetched Pub. For each Fetched entry that has data, overwrite the Original entry :param pub_dict: (dict) Original pub dictionary :param fetch_dict: (dict) Fetched pub dictionary from doi.org :return: (dict) Updated pub dictionary, with fetched data taking precedence """ blank = [" ", "", None] for k, v in fetch_dict.items(): try: if fetch_dict[k] != blank: pub_dict[k] = fetch_dict[k] except KeyError: pass return pub_dict
Take in our Original Pub, and Fetched Pub. For each Fetched entry that has data, overwrite the Original entry :param pub_dict: (dict) Original pub dictionary :param fetch_dict: (dict) Fetched pub dictionary from doi.org :return: (dict) Updated pub dictionary, with fetched data taking precedence
entailment
def noaa_citation(self, doi_string): """ Special instructions for moving noaa data to the correct fields :param doi_string: (str) NOAA url :return: None """ # Append location 1 if 'link' in self.root_dict['pub'][0]: self.root_dict['pub'][0]['link'].append({"url": doi_string}) else: self.root_dict['pub'][0]['link'] = [{"url": doi_string}] # Append location 2 self.root_dict['dataURL'] = doi_string return
Special instructions for moving noaa data to the correct fields :param doi_string: (str) NOAA url :return: None
entailment
def illegal_doi(self, doi_string): """ DOI string did not match the regex. Determine what the data is. :param doi_string: (str) Malformed DOI string :return: None """ logger_doi_resolver.info("enter illegal_doi") # Ignores empty or irrelevant strings (blank, spaces, na, nan, ', others) if len(doi_string) > 5: # NOAA string if 'noaa' in doi_string.lower(): self.noaa_citation(doi_string) # Paragraph citation / Manual citation elif doi_string.count(' ') > 3: self.root_dict['pub'][0]['citation'] = doi_string # Strange Links or Other, send to quarantine else: logger_doi_resolver.warn("illegal_doi: bad doi string: {}".format(doi_string)) logger_doi_resolver.info("exit illegal_doi") return
DOI string did not match the regex. Determine what the data is. :param doi_string: (str) Malformed DOI string :return: None
entailment
def compile_fetch(self, raw, doi_id): """ Loop over Raw and add selected items to Fetch with proper formatting :param dict raw: JSON data from doi.org :param str doi_id: :return dict: """ fetch_dict = OrderedDict() order = {'author': 'author', 'type': 'type', 'identifier': '', 'title': 'title', 'journal': 'container-title', 'pubYear': '', 'volume': 'volume', 'publisher': 'publisher', 'page':'page', 'issue': 'issue'} for k, v in order.items(): try: if k == 'identifier': fetch_dict[k] = [{"type": "doi", "id": doi_id, "url": "http://dx.doi.org/" + doi_id}] elif k == 'author': fetch_dict[k] = self.compile_authors(raw[v]) elif k == 'pubYear': fetch_dict[k] = self.compile_date(raw['issued']['date-parts']) else: fetch_dict[k] = raw[v] except KeyError as e: # If we try to add a key that doesn't exist in the raw dict, then just keep going. logger_doi_resolver.warn("compile_fetch: KeyError: key not in raw: {}, {}".format(v, e)) return fetch_dict
Loop over Raw and add selected items to Fetch with proper formatting :param dict raw: JSON data from doi.org :param str doi_id: :return dict:
entailment
def get_data(self, doi_id, idx): """ Resolve DOI and compile all attributes into one dictionary :param str doi_id: :param int idx: Publication index :return dict: Updated publication dictionary """ tmp_dict = self.root_dict['pub'][0].copy() try: # Send request to grab metadata at URL url = "http://dx.doi.org/" + doi_id headers = {"accept": "application/rdf+xml;q=0.5, application/citeproc+json;q=1.0"} r = requests.get(url, headers=headers) # DOI 404. Data not retrieved. Log and return original pub if r.status_code == 400: logger_doi_resolver.warn("doi.org STATUS: 404, {}".format(doi_id)) # Ignore other status codes. Run when status is 200 (good response) elif r.status_code == 200: logger_doi_resolver.info("doi.org STATUS: 200") # Load data from http response raw = json.loads(r.text) # Create a new pub dictionary with metadata received fetch_dict = self.compile_fetch(raw, doi_id) # Compare the two pubs. Overwrite old data with new data where applicable tmp_dict = self.compare_replace(tmp_dict, fetch_dict) tmp_dict['pubDataUrl'] = 'doi.org' self.root_dict['pub'][idx] = tmp_dict except urllib.error.URLError as e: logger_doi_resolver.warn("get_data: URLError: malformed doi: {}, {}".format(doi_id, e)) except ValueError as e: logger_doi_resolver.warn("get_data: ValueError: cannot resolve dois from this publisher: {}, {}".format(doi_id, e)) return
Resolve DOI and compile all attributes into one dictionary :param str doi_id: :param int idx: Publication index :return dict: Updated publication dictionary
entailment
def find_doi(self, curr_dict): """ Recursively search the file for the DOI id. More taxing, but more flexible when dictionary structuring isn't absolute :param dict curr_dict: Current dictionary being searched :return dict bool: Recursive - Current dictionary, False flag that DOI was not found :return str bool: Final - DOI id, True flag that DOI was found """ try: if 'id' in curr_dict: return curr_dict['id'], True elif isinstance(curr_dict, list): for i in curr_dict: return self.find_doi(i) elif isinstance(curr_dict, dict): for k, v in curr_dict.items(): if k == 'identifier': return self.find_doi(v) return curr_dict, False else: return curr_dict, False # If the ID key doesn't exist, then return the original dict with a flag except TypeError: return curr_dict, False
Recursively search the file for the DOI id. More taxing, but more flexible when dictionary structuring isn't absolute :param dict curr_dict: Current dictionary being searched :return dict bool: Recursive - Current dictionary, False flag that DOI was not found :return str bool: Final - DOI id, True flag that DOI was found
entailment
def cast_values_csvs(d, idx, x): """ Attempt to cast string to float. If error, keep as a string. :param dict d: Data :param int idx: Index number :param str x: Data :return any: """ try: d[idx].append(float(x)) except ValueError: d[idx].append(x) # logger_misc.warn("cast_values_csv: ValueError") # logger_misc.warn("ValueError: col: {}, {}".format(x, e)) except KeyError as e: logger_misc.warn("cast_values_csv: KeyError: col: {}, {}".format(x, e)) return d
Attempt to cast string to float. If error, keep as a string. :param dict d: Data :param int idx: Index number :param str x: Data :return any:
entailment
def cast_float(x): """ Attempt to cleanup string or convert to number value. :param any x: :return float: """ try: x = float(x) except ValueError: try: x = x.strip() except AttributeError as e: logger_misc.warn("parse_str: AttributeError: String not number or word, {}, {}".format(x, e)) return x
Attempt to cleanup string or convert to number value. :param any x: :return float:
entailment
def cast_int(x): """ Cast unknown type into integer :param any x: :return int: """ try: x = int(x) except ValueError: try: x = x.strip() except AttributeError as e: logger_misc.warn("parse_str: AttributeError: String not number or word, {}, {}".format(x, e)) return x
Cast unknown type into integer :param any x: :return int:
entailment
def clean_doi(doi_string): """ Use regex to extract all DOI ids from string (i.e. 10.1029/2005pa001215) :param str doi_string: Raw DOI string value from input file. Often not properly formatted. :return list: DOI ids. May contain 0, 1, or multiple ids. """ regex = re.compile(r'\b(10[.][0-9]{3,}(?:[.][0-9]+)*/(?:(?!["&\'<>,])\S)+)\b') try: # Returns a list of matching strings m = re.findall(regex, doi_string) except TypeError as e: # If doi_string is None type, return empty list logger_misc.warn("TypeError cleaning DOI: {}, {}".format(doi_string, e)) m = [] return m
Use regex to extract all DOI ids from string (i.e. 10.1029/2005pa001215) :param str doi_string: Raw DOI string value from input file. Often not properly formatted. :return list: DOI ids. May contain 0, 1, or multiple ids.
entailment
def decimal_precision(row): """ Change the "precision" of values before writing to CSV. Each value is rounded to 3 numbers. ex: 300 -> 300 ex: 300.123456 -> 300.123 ex: 3.123456e-25 - > 3.123e-25 :param tuple row: Row of numbers to process :return list row: Processed row """ # _row = [] try: # Convert tuple to list for processing row = list(row) for idx, x in enumerate(row): x = str(x) # Is this a scientific notated float? Tear it apart with regex, round, and piece together again m = re.match(re_sci_notation, x) if m: _x2 = round(float(m.group(2)), 3) x = m.group(1) + str(_x2)[1:] + m.group(3) # A normal float? round to 3 decimals as usual else: try: x = round(float(x), 3) except (ValueError, TypeError): x = x row[idx] = x # Convert list back to tuple for csv writer row = tuple(row) except Exception as e: print("Error: Unable to fix the precision of values. File size may be larger than normal, {}".format(e)) return row
Change the "precision" of values before writing to CSV. Each value is rounded to 3 numbers. ex: 300 -> 300 ex: 300.123456 -> 300.123 ex: 3.123456e-25 - > 3.123e-25 :param tuple row: Row of numbers to process :return list row: Processed row
entailment
def fix_coordinate_decimal(d): """ Coordinate decimal degrees calculated by an excel formula are often too long as a repeating decimal. Round them down to 5 decimals :param dict d: Metadata :return dict d: Metadata """ try: for idx, n in enumerate(d["geo"]["geometry"]["coordinates"]): d["geo"]["geometry"]["coordinates"][idx] = round(n, 5) except Exception as e: logger_misc.error("fix_coordinate_decimal: {}".format(e)) return d
Coordinate decimal degrees calculated by an excel formula are often too long as a repeating decimal. Round them down to 5 decimals :param dict d: Metadata :return dict d: Metadata
entailment
def generate_timestamp(fmt=None): """ Generate a timestamp to mark when this file was last modified. :param str fmt: Special format instructions :return str: YYYY-MM-DD format, or specified format """ if fmt: time = dt.datetime.now().strftime(fmt) else: time = dt.date.today() return str(time)
Generate a timestamp to mark when this file was last modified. :param str fmt: Special format instructions :return str: YYYY-MM-DD format, or specified format
entailment
def generate_tsid(size=8): """ Generate a TSid string. Use the "PYT" prefix for traceability, and 8 trailing generated characters ex: PYT9AG234GS :return str: TSid """ chars = string.ascii_uppercase + string.digits _gen = "".join(random.choice(chars) for _ in range(size)) return "PYT" + str(_gen)
Generate a TSid string. Use the "PYT" prefix for traceability, and 8 trailing generated characters ex: PYT9AG234GS :return str: TSid
entailment
def get_appended_name(name, columns): """ Append numbers to a name until it no longer conflicts with the other names in a column. Necessary to avoid overwriting columns and losing data. Loop a preset amount of times to avoid an infinite loop. There shouldn't ever be more than two or three identical variable names in a table. :param str name: Variable name in question :param dict columns: Columns listed by variable name :return str: Appended variable name """ loop = 0 while name in columns: loop += 1 if loop > 10: logger_misc.warn("get_appended_name: Too many loops: Tried to get appended name but something looks wrong") break tmp = name + "-" + str(loop) if tmp not in columns: return tmp return name + "-99"
Append numbers to a name until it no longer conflicts with the other names in a column. Necessary to avoid overwriting columns and losing data. Loop a preset amount of times to avoid an infinite loop. There shouldn't ever be more than two or three identical variable names in a table. :param str name: Variable name in question :param dict columns: Columns listed by variable name :return str: Appended variable name
entailment
def get_authors_as_str(x): """ Take author or investigator data, and convert it to a concatenated string of names. Author data structure has a few variations, so account for all. :param any x: Author data :return str: Author string """ _authors = "" # if it's a string already, we're done if isinstance(x, str): return x # elif it's a list, keep going elif isinstance(x, list): # item in list is a str if isinstance(x[0], str): # loop and concat until the last item for name in x[:-1]: # all inner items get a semi-colon at the end _authors += str(name) + "; " # last item does not get a semi-colon at the end _authors += str(x[-1]) # item in list is a dictionary elif isinstance(x[0], dict): # dictionary structure SHOULD have authors listed until the "name" key. try: # loop and concat until the last item for entry in x[:-1]: # all inner items get a semi-colon at the end _authors += str(entry["name"]) + "; " # last item does not get a semi-colon at the end _authors += str(x[-1]["name"]) except KeyError: logger_misc.warn("get_authors_as_str: KeyError: Authors incorrect data structure") else: logger_misc.debug("get_authors_as_str: TypeError: author/investigators isn't str or list: {}".format(type(x))) return _authors
Take author or investigator data, and convert it to a concatenated string of names. Author data structure has a few variations, so account for all. :param any x: Author data :return str: Author string
entailment
def get_dsn(d): """ Get the dataset name from a record :param dict d: Metadata :return str: Dataset name """ try: return d["dataSetName"] except Exception as e: logger_misc.warn("get_dsn: Exception: No datasetname found, unable to continue: {}".format(e)) exit(1)
Get the dataset name from a record :param dict d: Metadata :return str: Dataset name
entailment
def get_ensemble_counts(d): """ Determine if this is a 1 or 2 column ensemble. Then determine how many columns and rows it has. :param dict d: Metadata (table) :return dict _rows_cols: Row and column counts """ _rows_cols = {"rows": 0, "cols": 0} try: if len(d) == 1: for var, data in d.items(): # increment columns by one _rows_cols["cols"] += len(data["values"]) # get row count by getting len of column (since it's only one list _rows_cols["rows"] = len(data["values"][0]) break elif len(d) == 2: for var, data in d.items(): # multiple columns in one. list of lists if isinstance(data["number"], list): # add total amount of columns to the running total _rows_cols["cols"] += len(data["values"]) # single column. one list else: # increment columns by one _rows_cols["cols"] += 1 # get row count by getting len of column (since it's only one list _rows_cols["rows"] = len(data["values"]) except Exception as e: logger_misc.warn("get_ensemble_counts: {}".format(e)) return _rows_cols
Determine if this is a 1 or 2 column ensemble. Then determine how many columns and rows it has. :param dict d: Metadata (table) :return dict _rows_cols: Row and column counts
entailment
def get_missing_value_key(d): """ Get the Missing Value entry from a table of data. If none is found, try the columns. If still none found, prompt user. :param dict d: Table of data :return str _mv: Missing Value """ _mv = "nan" # Attempt to find a table-level missing value key try: # check for missing value key at the table root _mv = d["missingValue"] except KeyError as e: logger_misc.info("get_missing_value: No missing value key found: {}".format(e)) except AttributeError as e: logger_misc.warn("get_missing_value: Column is wrong data type: {}".format(e)) # No table-level missing value found. Attempt to find a column-level missing value key if not _mv: try: # loop for each column of data, searching for a missing value key for k, v in d["columns"].items(): # found a column with a missing value key. Store it and exit the loop. _mv = v["missingValue"] break except KeyError: # There are no columns in this table. We've got bigger problems! pass # No table-level or column-level missing value. Out of places to look. Ask the user to enter the missing value # used in this data # if not _mv: # print("No 'missingValue' key provided. Please type the missingValue used in this file: {}\n".format(filename)) # _mv = input("missingValue: ") return _mv
Get the Missing Value entry from a table of data. If none is found, try the columns. If still none found, prompt user. :param dict d: Table of data :return str _mv: Missing Value
entailment
def get_variable_name_col(d): """ Get the variable name from a table or column :param dict d: Metadata (column) :return str var: Variable name """ var = "" try: var = d["variableName"] except KeyError: try: var = d["name"] except KeyError: num = "unknown" if "number" in d: num = d["number"] print("Error: column number <{}> is missing a variableName. Please fix.".format(num)) logger_misc.info("get_variable_name_col: KeyError: missing key") return var
Get the variable name from a table or column :param dict d: Metadata (column) :return str var: Variable name
entailment
def get_table_key(key, d, fallback=""): """ Try to get a table name from a data table :param str key: Key to try first :param dict d: Data table :param str fallback: (optional) If we don't find a table name, use this as a generic name fallback. :return str var: Data table name """ try: var = d[key] return var except KeyError: logger_misc.info("get_variable_name_table: KeyError: missing {}, use name: {}".format(key, fallback)) return fallback
Try to get a table name from a data table :param str key: Key to try first :param dict d: Data table :param str fallback: (optional) If we don't find a table name, use this as a generic name fallback. :return str var: Data table name
entailment
def is_ensemble(d): """ Check if a table of data is an ensemble table. Is the first values index a list? ensemble. Int/float? not ensemble. :param dict d: Table data :return bool: Ensemble or not ensemble """ for var, data in d.items(): try: if isinstance(data["number"], list): return True except Exception as e: logger_misc.debug("misc: is_ensemble: {}".format(e)) return False
Check if a table of data is an ensemble table. Is the first values index a list? ensemble. Int/float? not ensemble. :param dict d: Table data :return bool: Ensemble or not ensemble
entailment
def load_fn_matches_ext(file_path, file_type): """ Check that the file extension matches the target extension given. :param str file_path: Path to be checked :param str file_type: Target extension :return bool correct_ext: Extension match or does not match """ correct_ext = False curr_ext = os.path.splitext(file_path)[1] exts = [curr_ext, file_type] try: # special case: if file type is excel, both extensions are valid. if ".xlsx" in exts and ".xls" in exts: correct_ext = True elif curr_ext == file_type: correct_ext = True else: print("Use '{}' to load this file: {}".format(FILE_TYPE_MAP[curr_ext]["load_fn"], os.path.basename(file_path))) except Exception as e: logger_misc.debug("load_fn_matches_ext: {}".format(e)) return correct_ext
Check that the file extension matches the target extension given. :param str file_path: Path to be checked :param str file_type: Target extension :return bool correct_ext: Extension match or does not match
entailment
def match_operators(inp, relate, cut): """ Compare two items. Match a string operator to an operator function :param str inp: Comparison item :param str relate: Comparison operator :param any cut: Comparison item :return bool truth: Comparison truth """ logger_misc.info("enter match_operators") ops = {'>': operator.gt, '<': operator.lt, '>=': operator.ge, '<=': operator.le, '=': operator.eq } try: truth = ops[relate](inp, cut) except KeyError as e: truth = False logger_misc.warn("get_truth: KeyError: Invalid operator input: {}, {}".format(relate, e)) logger_misc.info("exit match_operators") return truth
Compare two items. Match a string operator to an operator function :param str inp: Comparison item :param str relate: Comparison operator :param any cut: Comparison item :return bool truth: Comparison truth
entailment
def match_arr_lengths(l): """ Check that all the array lengths match so that a DataFrame can be created successfully. :param list l: Nested arrays :return bool: Valid or invalid """ try: # length of first list. use as basis to check other list lengths against. inner_len = len(l[0]) # check each nested list for i in l: # if the length doesn't match the first list, then don't proceed. if len(i) != inner_len: return False except IndexError: # couldn't get index 0. Wrong data type given or not nested lists print("Error: Array data is not formatted correctly.") return False except TypeError: # Non-iterable data type given. print("Error: Array data missing") return False # all array lengths are equal. made it through the whole list successfully return True
Check that all the array lengths match so that a DataFrame can be created successfully. :param list l: Nested arrays :return bool: Valid or invalid
entailment
def mv_files(src, dst): """ Move all files from one directory to another :param str src: Source directory :param str dst: Destination directory :return none: """ # list the files in the src directory files = os.listdir(src) # loop for each file found for file in files: # move the file from the src to the dst shutil.move(os.path.join(src, file), os.path.join(dst, file)) return
Move all files from one directory to another :param str src: Source directory :param str dst: Destination directory :return none:
entailment
def normalize_name(s): """ Remove foreign accents and characters to normalize the string. Prevents encoding errors. :param str s: String :return str s: String """ # Normalize the string into a byte string form s = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore') # Remove the byte string and quotes from the string s = str(s)[2:-1] return s
Remove foreign accents and characters to normalize the string. Prevents encoding errors. :param str s: String :return str s: String
entailment
def path_type(path, target): """ Determine if given path is file, directory, or other. Compare with target to see if it's the type we wanted. :param str path: Path :param str target: Target type wanted :return bool: Path is what it claims to be (True) or mismatch (False) """ if os.path.isfile(path) and target == "file": return True elif os.path.isdir(path) and target == "directory": return True else: print("Error: Path given is not a {}: {}".format(target, path)) return False
Determine if given path is file, directory, or other. Compare with target to see if it's the type we wanted. :param str path: Path :param str target: Target type wanted :return bool: Path is what it claims to be (True) or mismatch (False)
entailment
def print_filename(path): """ Print out lipd filename that is being read or written :param str path: all file metadata :return str: filename """ if os.path.basename(path): return os.path.basename(path) return path
Print out lipd filename that is being read or written :param str path: all file metadata :return str: filename
entailment
def prompt_protocol(): """ Prompt user if they would like to save pickle file as a dictionary or an object. :return str: Answer """ stop = 3 ans = "" while True and stop > 0: ans = input("Save as (d)ictionary or (o)bject?\n" "* Note:\n" "Dictionaries are more basic, and are compatible with Python v2.7+.\n" "Objects are more complex, and are only compatible with v3.4+ ") if ans not in ("d", "o"): print("Invalid response: Please choose 'd' or 'o'") else: break # if a valid answer isn't captured, default to dictionary (safer, broader) if ans == "": ans = "d" return ans
Prompt user if they would like to save pickle file as a dictionary or an object. :return str: Answer
entailment
def put_tsids(x): """ (Recursive) Add in TSids into any columns that do not have them. Look for "columns" keys, and then start looping and adding generated TSids to each column :param any x: Unknown :return any x: Unknown """ try: if isinstance(x, dict): try: for k, v in x.items(): # Is this the columns key? if k == "columns": try: # loop over each column of data. Sorted by variableName key for var, data in v.items(): try: # make a case-insensitive keys list for checking existence of "tsid" keys = [key.lower() for key in data.keys()] # If a TSid already exists, then we don't need to do anything. if "tsid" not in keys: # generate the TSid, and add it to the dictionary data["TSid"] = generate_tsid() logger_misc.info("put_tsids: Generated new TSid: {}".format(data["TSid"])) except AttributeError as e: logger_misc.debug("put_tsids: level 3: AttributeError: {}".format(e)) except Exception as e: logger_misc.debug("put_tsids: level 3: Exception: {}".format(e)) except Exception as e: print("put_tsids: level 2: Exception: {}, {}".format(k, e)) # If it's not "columns", then dive deeper. else: x[k] = put_tsids(v) except Exception as e: print("put_tsids: level 1: Exception: {}, {}".format(k, e)) # Item is a list, dive deeper for each item in the list elif isinstance(x, list): for idx, entry in enumerate(x): x[idx] = put_tsids(entry) except Exception as e: print("put_tsids: root: Exception: {}, {}".format(k, e)) return x
(Recursive) Add in TSids into any columns that do not have them. Look for "columns" keys, and then start looping and adding generated TSids to each column :param any x: Unknown :return any x: Unknown
entailment
def rm_empty_fields(x): """ (Recursive) Go through N number of nested data types and remove all empty entries. :param any x: Unknown :return any x: Unknown """ # No logger here because the function is recursive. # Int types don't matter. Return as-is. if not isinstance(x, int) and not isinstance(x, float): if isinstance(x, str) or x is None: try: # Remove new line characters and carriage returns x = x.rstrip() except AttributeError: # None types don't matter. Keep going. pass if x in EMPTY: # Substitute empty entries with "" x = '' elif isinstance(x, list): # Recurse once for each item in the list for i, v in enumerate(x): x[i] = rm_empty_fields(x[i]) # After substitutions, remove empty entries. for i in x: # Many 0 values are important (coordinates, m/m/m/m). Don't remove them. if not i and i not in [0, 0.0]: x.remove(i) elif isinstance(x, dict): # First, go through and substitute "" (empty string) entry for any values in EMPTY for k, v in x.items(): x[k] = rm_empty_fields(v) # After substitutions, go through and delete the key-value pair. # This has to be done after we come back up from recursion because we cannot pass keys down. for key in list(x.keys()): if not x[key] and x[key] not in [0, 0.0]: del x[key] return x
(Recursive) Go through N number of nested data types and remove all empty entries. :param any x: Unknown :return any x: Unknown
entailment
def rm_empty_doi(d): """ If an "identifier" dictionary has no doi ID, then it has no use. Delete it. :param dict d: Metadata :return dict d: Metadata """ logger_misc.info("enter remove_empty_doi") try: # Check each publication dictionary for pub in d['pub']: # If no identifier, then we can quit here. If identifier, then keep going. if 'identifier' in pub: if 'id' in pub['identifier'][0]: # If there's a DOI id, but it's EMPTY if pub['identifier'][0]['id'] in EMPTY: del pub['identifier'] else: # If there's an identifier section, with no DOI id del pub['identifier'] except KeyError as e: # What else could go wrong? logger_misc.warn("remove_empty_doi: KeyError: publication key not found, {}".format(e)) logger_misc.info("exit remove_empty_doi") return d
If an "identifier" dictionary has no doi ID, then it has no use. Delete it. :param dict d: Metadata :return dict d: Metadata
entailment
def rm_files(path, extension): """ Remove all files in the given directory with the given extension :param str path: Directory :param str extension: File type to remove :return none: """ files = list_files(extension, path) for file in files: if file.endswith(extension): os.remove(os.path.join(path, file)) return
Remove all files in the given directory with the given extension :param str path: Directory :param str extension: File type to remove :return none:
entailment
def rm_values_fields(x): """ (Recursive) Remove all "values" fields from the metadata :param any x: Any data type :return dict x: Metadata (values removed) """ if isinstance(x, dict): if "values" in x: del x["values"] else: for k, v in x.items(): if isinstance(v, dict): rm_values_fields(v) elif isinstance(v, list): rm_values_fields(v) elif isinstance(x, list): for i in x: rm_values_fields(i) return x
(Recursive) Remove all "values" fields from the metadata :param any x: Any data type :return dict x: Metadata (values removed)
entailment
def rm_missing_values_table(d): """ Loop for each table column and remove the missingValue key & data :param dict d: Metadata (table) :return dict d: Metadata (table) """ try: for k, v in d["columns"].items(): d["columns"][k] = rm_keys_from_dict(v, ["missingValue"]) except Exception: # If we get a KeyError or some other error, it's not a big deal. Keep going. pass return d
Loop for each table column and remove the missingValue key & data :param dict d: Metadata (table) :return dict d: Metadata (table)
entailment
def rm_keys_from_dict(d, keys): """ Given a dictionary and a key list, remove any data in the dictionary with the given keys. :param dict d: Metadata :param list keys: Keys to be removed :return dict d: Metadata """ # Loop for each key given for key in keys: # Is the key in the dictionary? if key in d: try: d.pop(key, None) except KeyError: # Not concerned with an error. Keep going. pass return d
Given a dictionary and a key list, remove any data in the dictionary with the given keys. :param dict d: Metadata :param list keys: Keys to be removed :return dict d: Metadata
entailment
def _replace_missing_values_table(values, mv): """ Receive all table column values as a list of lists. Loop for each column of values :param list values: Metadata (columns) :param any mv: Missing value currently in use :return list: Metadata (columns) """ for idx, column in enumerate(values): values[idx] = _replace_missing_values_column(column, mv) return values
Receive all table column values as a list of lists. Loop for each column of values :param list values: Metadata (columns) :param any mv: Missing value currently in use :return list: Metadata (columns)
entailment
def _replace_missing_values_column(values, mv): """ Replace missing values in the values list where applicable :param list values: Metadata (column values) :return list values: Metadata (column values) """ for idx, v in enumerate(values): try: if v in EMPTY or v == mv: values[idx] = "nan" elif math.isnan(float(v)): values[idx] = "nan" else: values[idx] = v except (TypeError, ValueError): values[idx] = v return values
Replace missing values in the values list where applicable :param list values: Metadata (column values) :return list values: Metadata (column values)
entailment
def split_path_and_file(s): """ Given a full path to a file, split and return a path and filename :param str s: Path :return str _path: Directory Path :return str _filename: Filename """ _path = s _filename = "" try: x = os.path.split(s) _path = x[0] _filename = x[1] except Exception: print("Error: unable to split path") return _path, _filename
Given a full path to a file, split and return a path and filename :param str s: Path :return str _path: Directory Path :return str _filename: Filename
entailment
def unwrap_arrays(l): """ Unwrap nested lists to be one "flat" list of lists. Mainly for prepping ensemble data for DataFrame() creation :param list l: Nested lists :return list l2: Flattened lists """ # keep processing until all nesting is removed process = True # fail safe: cap the loops at 20, so we don't run into an error and loop infinitely. # if it takes more than 20 loops then there is a problem with the data given. loops = 25 while process and loops > 0: try: # new "flat" list l2 = [] for k in l: # all items in this list are numeric, so this list is done. append to main list if all(isinstance(i, float) or isinstance(i, int) for i in k): l2.append(k) # this list has more nested lists inside. append each individual nested list to the main one. elif all(isinstance(i, list) or isinstance(i, np.ndarray) for i in k): for i in k: l2.append(i) except Exception: print("something went wrong during process") # verify the main list try: # if every list has a numeric at index 0, then there is no more nesting and we can stop processing if all(isinstance(i[0], (int, str, float)) for i in l2): process = False else: l = l2 except IndexError: # there's no index 0, so there must be mixed data types or empty data somewhere. print("something went wrong during verify") loops -= 1 return l2
Unwrap nested lists to be one "flat" list of lists. Mainly for prepping ensemble data for DataFrame() creation :param list l: Nested lists :return list l2: Flattened lists
entailment
def extract(d, whichtables, mode, time): """ LiPD Version 1.3 Main function to initiate LiPD to TSOs conversion. Each object has a "paleoNumber" or "chronNumber" "tableNumber" "modelNumber" "time_id" "mode" - chronData or paleoData "tableType" - "meas" "ens" "summ" :param dict d: Metadata for one LiPD file :param str whichtables: all, meas, summ, or ens :param str mode: paleo or chron mode :return list _ts: Time series """ logger_ts.info("enter extract_main") _root = {} _ts = {} # _switch = {"paleoData": "chronData", "chronData": "paleoData"} _pc = "paleoData" if mode == "chron": _pc = "chronData" _root["mode"] = _pc _root["time_id"] = time try: # Build the root level data. # This will serve as the template for which column data will be added onto later. for k, v in d.items(): if k == "funding": _root = _extract_fund(v, _root) elif k == "geo": _root = _extract_geo(v, _root) elif k == 'pub': _root = _extract_pub(v, _root) # elif k in ["chronData", "paleoData"]: # # Store chronData and paleoData as-is. Need it to collapse without data loss. # _root[k] = copy.deepcopy(v) else: if k not in ["chronData", "paleoData"]: _root[k] = v # Create tso dictionaries for each individual column (build on root data) _ts = _extract_pc(d, _root, _pc, whichtables) except Exception as e: logger_ts.error("extract: Exception: {}".format(e)) print("extract: Exception: {}".format(e)) logger_ts.info("exit extract_main") return _ts
LiPD Version 1.3 Main function to initiate LiPD to TSOs conversion. Each object has a "paleoNumber" or "chronNumber" "tableNumber" "modelNumber" "time_id" "mode" - chronData or paleoData "tableType" - "meas" "ens" "summ" :param dict d: Metadata for one LiPD file :param str whichtables: all, meas, summ, or ens :param str mode: paleo or chron mode :return list _ts: Time series
entailment
def _extract_fund(l, _root): """ Creates flat funding dictionary. :param list l: Funding entries """ logger_ts.info("enter _extract_funding") for idx, i in enumerate(l): for k, v in i.items(): _root['funding' + str(idx + 1) + '_' + k] = v return _root
Creates flat funding dictionary. :param list l: Funding entries
entailment
def _extract_geo(d, _root): """ Extract geo data from input :param dict d: Geo :return dict _root: Root data """ logger_ts.info("enter ts_extract_geo") # May not need these if the key names are corrected in the future. # COORDINATE ORDER: [LON, LAT, ELEV] x = ['geo_meanLon', 'geo_meanLat', 'geo_meanElev'] # Iterate through geo dictionary for k, v in d.items(): # Case 1: Coordinates special naming if k == 'coordinates': for idx, p in enumerate(v): try: # Check that our value is not in EMPTY. if isinstance(p, str): if p.lower() in EMPTY: # If elevation is a string or 0, don't record it if idx != 2: # If long or lat is empty, set it as 0 instead _root[x[idx]] = 0 else: # Set the value as a float into its entry. _root[x[idx]] = float(p) # Value is a normal number, or string representation of a number else: # Set the value as a float into its entry. _root[x[idx]] = float(p) except IndexError as e: logger_ts.warn("_extract_geo: IndexError: idx: {}, val: {}, {}".format(idx, p, e)) # Case 2: Any value that is a string can be added as-is elif isinstance(v, str): if k == 'meanElev': try: # Some data sets have meanElev listed under properties for some reason. _root['geo_' + k] = float(v) except ValueError as e: # If the value is a string, then we don't want it logger_ts.warn("_extract_geo: ValueError: meanElev is a string: {}, {}".format(v, e)) else: _root['geo_' + k] = v # Case 3: Nested dictionary. Recursion elif isinstance(v, dict): _root = _extract_geo(v, _root) return _root
Extract geo data from input :param dict d: Geo :return dict _root: Root data
entailment
def _extract_pub(l, _root): """ Extract publication data from one or more publication entries. :param list l: Publication :return dict _root: Root data """ logger_ts.info("enter _extract_pub") # For each publication entry for idx, pub in enumerate(l): logger_ts.info("processing publication #: {}".format(idx)) # Get author data first, since that's the most ambiguously structured data. _root = _extract_authors(pub, idx, _root) # Go through data of this publication for k, v in pub.items(): # Case 1: DOI ID. Don't need the rest of 'identifier' dict if k == 'identifier': try: _root['pub' + str(idx + 1) + '_DOI'] = v[0]['id'] except KeyError as e: logger_ts.warn("_extract_pub: KeyError: no doi id: {}, {}".format(v, e)) # Case 2: All other string entries else: if k != 'authors' and k != 'author': _root['pub' + str(idx + 1) + '_' + k] = v return _root
Extract publication data from one or more publication entries. :param list l: Publication :return dict _root: Root data
entailment
def _extract_authors(pub, idx, _root): """ Create a concatenated string of author names. Separate names with semi-colons. :param any pub: Publication author structure is ambiguous :param int idx: Index number of Pub """ logger_ts.info("enter extract_authors") try: # DOI Author data. We'd prefer to have this first. names = pub['author'] except KeyError as e: try: # Manually entered author data. This is second best. names = pub['authors'] except KeyError as e: # Couldn't find any author data. Skip it altogether. names = False logger_ts.info("extract_authors: KeyError: author data not provided, {}".format(e)) # If there is author data, find out what type it is if names: # Build author names onto empty string auth = '' # Is it a list of dicts or a list of strings? Could be either # Authors: Stored as a list of dictionaries or list of strings if isinstance(names, list): for name in names: if isinstance(name, str): auth += name + ';' elif isinstance(name, dict): for k, v in name.items(): auth += v + ';' elif isinstance(names, str): auth = names # Enter finished author string into target _root['pub' + str(idx + 1) + '_author'] = auth[:-1] return _root
Create a concatenated string of author names. Separate names with semi-colons. :param any pub: Publication author structure is ambiguous :param int idx: Index number of Pub
entailment
def _extract_pc(d, root, pc, whichtables): """ Extract all data from a PaleoData dictionary. :param dict d: PaleoData dictionary :param dict root: Time series root data :param str pc: paleoData or chronData :param str whichtables: all, meas, summ, or ens :return list _ts: Time series """ logger_ts.info("enter extract_pc") _ts = [] try: # For each table in pc for k, v in d[pc].items(): if whichtables == "all" or whichtables == "meas": for _table_name1, _table_data1 in v["measurementTable"].items(): _ts = _extract_table(_table_data1, copy.deepcopy(root), pc, _ts, "meas") if whichtables != "meas": if "model" in v: for _table_name1, _table_data1 in v["model"].items(): # get the method info for this model. This will be paired to all summ and ens table data _method = _extract_method(_table_data1["method"]) if whichtables == "all" or whichtables == "summ": if "summaryTable" in _table_data1: for _table_name2, _table_data2 in _table_data1["summaryTable"].items(): # take a copy of this tso root _tso = copy.deepcopy(root) # add in the method details _tso.update(_method) # add in the table details _ts = _extract_table(_table_data2, _tso, pc, _ts, "summ") if whichtables == "all" or whichtables == "ens": if "ensembleTable" in _table_data1: for _table_name2, _table_data2 in _table_data1["ensembleTable"].items(): _tso = copy.deepcopy(root) _tso.update(_method) _ts = _extract_table(_table_data2, _tso, pc, _ts, "ens") except Exception as e: logger_ts.warn("extract_pc: Exception: {}".format(e)) return _ts
Extract all data from a PaleoData dictionary. :param dict d: PaleoData dictionary :param dict root: Time series root data :param str pc: paleoData or chronData :param str whichtables: all, meas, summ, or ens :return list _ts: Time series
entailment
def _extract_method(method): """ Make a timeseries-formatted version of model method data :param dict method: Method data :return dict _method: Method data, formatted """ _method = {} for k,v in method.items(): _method["method_" + k] = v return _method
Make a timeseries-formatted version of model method data :param dict method: Method data :return dict _method: Method data, formatted
entailment
def _extract_special(current, table_data): """ Extract year, age, and depth column from table data :param dict table_data: Data at the table level :param dict current: Current data :return dict current: """ logger_ts.info("enter extract_special") try: # Add age, year, and depth columns to ts_root where possible for k, v in table_data['columns'].items(): s = "" # special case for year bp, or any variation of it. Translate key to "age"" if "bp" in k.lower(): s = "age" # all other normal cases. clean key and set key. elif any(x in k.lower() for x in ('age', 'depth', 'year', "yr", "distance_from_top", "distance")): # Some keys have units hanging on them (i.e. 'year_ad', 'depth_cm'). We don't want units on the keys if re_pandas_x_und.match(k): s = k.split('_')[0] elif "distance" in k: s = "depth" else: s = k # create the entry in ts_root. if s: try: current[s] = v['values'] except KeyError as e: # Values key was not found. logger_ts.warn("extract_special: KeyError: 'values' not found, {}".format(e)) try: current[s + 'Units'] = v['units'] except KeyError as e: # Values key was not found. logger_ts.warn("extract_special: KeyError: 'units' not found, {}".format(e)) except Exception as e: logger_ts.error("extract_special: {}".format(e)) return current
Extract year, age, and depth column from table data :param dict table_data: Data at the table level :param dict current: Current data :return dict current:
entailment
def _extract_table_root(d, current, pc): """ Extract data from the root level of a paleoData table. :param dict d: paleoData table :param dict current: Current root data :param str pc: paleoData or chronData :return dict current: Current root data """ logger_ts.info("enter extract_table_root") try: for k, v in d.items(): if isinstance(v, str): current[pc + '_' + k] = v except Exception as e: logger_ts.error("extract_table_root: {}".format(e)) return current
Extract data from the root level of a paleoData table. :param dict d: paleoData table :param dict current: Current root data :param str pc: paleoData or chronData :return dict current: Current root data
entailment
def _extract_table_model(table_data, current, tt): """ Add in modelNumber and summaryNumber fields if this is a summary table :param dict table_data: Table data :param dict current: LiPD root data :param str tt: Table type "summ", "ens", "meas" :return dict current: Current root data """ try: if tt in ["summ", "ens"]: m = re.match(re_sheet, table_data["tableName"]) if m: _pc_num= m.group(1) + "Number" current[_pc_num] = m.group(2) current["modelNumber"] = m.group(4) current["tableNumber"] = m.group(6) else: logger_ts.error("extract_table_summary: Unable to parse paleo/model/table numbers") except Exception as e: logger_ts.error("extract_table_summary: {}".format(e)) return current
Add in modelNumber and summaryNumber fields if this is a summary table :param dict table_data: Table data :param dict current: LiPD root data :param str tt: Table type "summ", "ens", "meas" :return dict current: Current root data
entailment
def _extract_table(table_data, current, pc, ts, tt): """ Use the given table data to create a time series entry for each column in the table. :param dict table_data: Table data :param dict current: LiPD root data :param str pc: paleoData or chronData :param list ts: Time series (so far) :param bool summary: Summary Table or not :return list ts: Time series (so far) """ current["tableType"] = tt # Get root items for this table current = _extract_table_root(table_data, current, pc) # Add in modelNumber and tableNumber if this is "ens" or "summ" table current = _extract_table_model(table_data, current, tt) # Add age, depth, and year columns to root if available _table_tmp = _extract_special(current, table_data) try: # Start creating entries using dictionary copies. for _col_name, _col_data in table_data["columns"].items(): # Add column data onto root items. Copy so we don't ruin original data _col_tmp = _extract_columns(_col_data, copy.deepcopy(_table_tmp), pc) try: ts.append(_col_tmp) except Exception as e: logger_ts.warn("extract_table: Unable to create ts entry, {}".format(e)) except Exception as e: logger_ts.error("extract_table: {}".format(e)) return ts
Use the given table data to create a time series entry for each column in the table. :param dict table_data: Table data :param dict current: LiPD root data :param str pc: paleoData or chronData :param list ts: Time series (so far) :param bool summary: Summary Table or not :return list ts: Time series (so far)
entailment
def _extract_columns(d, tmp_tso, pc): """ Extract data from one paleoData column :param dict d: Column dictionary :param dict tmp_tso: TSO dictionary with only root items :return dict: Finished TSO """ logger_ts.info("enter extract_columns") for k, v in d.items(): if isinstance(v, dict): flat_data = _extract_nested(pc + "_" + k, v, {}) for n,m in flat_data.items(): tmp_tso[n] = m else: # Assume if it's not a special nested case, then it's a string value tmp_tso[pc + '_' + k] = v return tmp_tso
Extract data from one paleoData column :param dict d: Column dictionary :param dict tmp_tso: TSO dictionary with only root items :return dict: Finished TSO
entailment
def collapse(l, raw): """ LiPD Version 1.3 Main function to initiate time series to LiPD conversion Each object has a: "paleoNumber" or "chronNumber" "tableNumber" "modelNumber" "time_id" "mode" - chronData or paleoData "tableType" - "meas" "ens" "summ" :param list l: Time series :return dict _master: LiPD data, sorted by dataset name """ logger_ts.info("enter collapse") # LiPD data (in progress), sorted dataset name _master = {} _dsn = "" try: # Determine if we're collapsing a paleo or chron time series _pc = l[0]["mode"] # Loop the time series for entry in l: # Get notable keys dsn = entry['dataSetName'] _dsn = dsn _current = entry # Since root items are the same in each column of the same dataset, we only need these steps the first time. if dsn not in _master: logger_ts.info("collapsing: {}".format(dsn)) print("collapsing: {}".format(dsn)) _master, _current = _collapse_root(_master, _current, dsn, _pc) try: _master[dsn]["paleoData"] = raw[dsn]["paleoData"] if "chronData" in raw[dsn]: _master[dsn]["chronData"] = raw[dsn]["chronData"] except KeyError as e: print("collapse: Could not collapse an object the dataset: {}, {}".format(dsn, e)) # Collapse pc, calibration, and interpretation _master = _collapse_pc(_master, _current, dsn, _pc) # The result combined into a single dataset. Remove the extra layer on the data. if len(_master) == 1: _master = _master[_dsn] print("Created LiPD data: 1 dataset") else: print("Created LiPD data: {} datasets".format(len(_master))) except Exception as e: print("Error: Unable to collapse time series, {}".format(e)) logger_ts.error("collapse: Exception: {}".format(e)) logger_ts.info("exit collapse") return _master
LiPD Version 1.3 Main function to initiate time series to LiPD conversion Each object has a: "paleoNumber" or "chronNumber" "tableNumber" "modelNumber" "time_id" "mode" - chronData or paleoData "tableType" - "meas" "ens" "summ" :param list l: Time series :return dict _master: LiPD data, sorted by dataset name
entailment
def _get_current_names(current, dsn, pc): """ Get the table name and variable name from the given time series entry :param dict current: Time series entry :param str pc: paleoData or chronData :return str _table_name: :return str _variable_name: """ _table_name = "" _variable_name = "" # Get key info try: _table_name = current['{}_tableName'.format(pc)] _variable_name = current['{}_variableName'.format(pc)] except Exception as e: print("Error: Unable to collapse time series: {}, {}".format(dsn, e)) logger_ts.error("get_current: {}, {}".format(dsn, e)) return _table_name, _variable_name
Get the table name and variable name from the given time series entry :param dict current: Time series entry :param str pc: paleoData or chronData :return str _table_name: :return str _variable_name:
entailment
def _collapse_root(master, current, dsn, pc): """ Collapse the root items of the current time series entry :param dict master: LiPD data (so far) :param dict current: Current time series entry :param str dsn: Dataset name :param str pc: paleoData or chronData (mode) :return dict master: :return dict current: """ logger_ts.info("enter collapse_root") _tmp_fund = {} _tmp_pub = {} # The tmp lipd data that we'll place in master later _tmp_master = {'pub': [], 'geo': {'geometry': {'coordinates': []}, 'properties': {}}, 'funding': [], 'paleoData': {}, "chronData": {}} # _raw = _switch[pc] _c_keys = ['meanLat', 'meanLon', 'meanElev'] _c_vals = [0, 0, 0] _p_keys = ['siteName', 'pages2kRegion', "location", "gcmdLocation", ""] try: # does not have # paleoData, chronData, mode, tableType, time_id, depth, depthUnits, age, ageUnits # does have # pub, geo, funding, proxy, archiveType, description, investigator, # For all keys in the current time series entry for k, v in current.items(): # Underscore present. Only underscore keys that belong here are funding, geo, and pub if "_" in k: # FUNDING if 'funding' in k: # Group funding items in tmp_funding by number m = re_fund_valid.match(k) try: _tmp_fund[m.group(1)][m.group(2)] = v except Exception: try: # If the first layer is missing, create it and try again _tmp_fund[m.group(1)] = {} _tmp_fund[m.group(1)][m.group(2)] = v except Exception: # Still not working. Give up. pass # GEO elif 'geo' in k: key = k.split('_') # Coordinates - [LON, LAT, ELEV] if key[1] in _c_keys: if key[1] == 'meanLon' or key[1] == "longitude": _c_vals[0] = v elif key[1] == 'meanLat' or key[1] == "latitude": _c_vals[1] = v elif key[1] == 'meanElev' or key[1] == "elevation": _c_vals[2] = v # Properties else: _tmp_master['geo']['properties'][key[1]] = v # All others # else: # _tmp_master['geo'][key[1]] = v # PUBLICATION elif 'pub' in k: # Group pub items in tmp_pub by number m = re_pub_valid.match(k.lower()) if m: number = int(m.group(1)) - 1 # 0 indexed behind the scenes, 1 indexed to user. key = m.group(2) # Authors ("Pu, Y.; Nace, T.; etc..") if key == 'author' or key == 'authors': try: _tmp_pub[number]['author'] = _collapse_author(v) except KeyError as e: # Dictionary not created yet. Assign one first. _tmp_pub[number] = {} _tmp_pub[number]['author'] = _collapse_author(v) # DOI ID elif key == 'DOI': try: _tmp_pub[number]['identifier'] = [{"id": v, "type": "doi", "url": "http://dx.doi.org/" + str(v)}] except KeyError: # Dictionary not created yet. Assign one first. _tmp_pub[number] = {} _tmp_pub[number]['identifier'] = [{"id": v, "type": "doi", "url": "http://dx.doi.org/" + str(v)}] # All others else: try: _tmp_pub[number][key] = v except KeyError: # Dictionary not created yet. Assign one first. _tmp_pub[number] = {} _tmp_pub[number][key] = v # No underscore in name, we can rule out the other obvious keys we don't want else: # Rule out any timeseries keys that we added, and paleoData/chronData prefixed keys. if not any(i in k.lower() or i is k.lower() for i in ["paleodata", "chrondata", "mode", "tabletype", "time_id", "depth", "depthunits", "age", "ageunits"]): # Root item: _tmp_master[k] = v continue # Append the compiled data into the master dataset data for k, v in _tmp_pub.items(): _tmp_master['pub'].append(v) for k, v in _tmp_fund.items(): _tmp_master['funding'].append(v) # Get rid of elevation coordinate if one was never added. if _c_vals[2] == 0: del _c_vals[2] _tmp_master['geo']['geometry']['coordinates'] = _c_vals # Create entry in object master, and set our new data to it. master[dsn] = _tmp_master except Exception as e: logger_ts.error("collapse_root: Exception: {}, {}".format(dsn, e)) logger_ts.info("exit collapse_root") return master, current
Collapse the root items of the current time series entry :param dict master: LiPD data (so far) :param dict current: Current time series entry :param str dsn: Dataset name :param str pc: paleoData or chronData (mode) :return dict master: :return dict current:
entailment
def _collapse_author(s): """ Split author string back into organized dictionary :param str s: Formatted names string "Last, F.; Last, F.; etc.." :return list of dict: One dictionary per author name """ logger_ts.info("enter collapse_author") l = [] authors = s.split(';') for author in authors: l.append({'name': author}) return l
Split author string back into organized dictionary :param str s: Formatted names string "Last, F.; Last, F.; etc.." :return list of dict: One dictionary per author name
entailment
def _collapse_pc(master, current, dsn, pc): """ Collapse the paleo or chron for the current time series entry :param dict master: LiPD data (so far) :param dict current: Current time series entry :param str dsn: Dataset name :param str pc: paleoData or chronData :return dict master: """ logger_ts.info("enter collapse_paleo") _table_name, _variable_name = _get_current_names(current, dsn, pc) try: # Get the names we need to build the hierarchy _m = re.match(re_sheet_w_number, _table_name) # Is this a summary table or a measurement table? _switch = {"meas": "measurementTable", "summ": "summaryTable", "ens": "ensembleTable"} _ms = _switch[current["tableType"]] # This is a measurement table. Put it in the correct part of the structure # master[datasetname][chronData][chron0][measurementTable][chron0measurement0] if _ms == "measurementTable": # master[dsn] = _collapse_build_skeleton(master(dsn), _ms, _m) # Collapse the keys in the table root if a table does not yet exist if _table_name not in master[dsn][pc][_m.group(1)][_ms]: _tmp_table = _collapse_table_root(current, dsn, pc) master[dsn][pc][_m.group(1)][_ms][_table_name] = _tmp_table # Collapse the keys at the column level, and return the column data _tmp_column = _collapse_column(current, pc) # Create the column entry in the table master[dsn][pc][_m.group(1)][_ms][_table_name]['columns'][_variable_name] = _tmp_column # This is a summary table. Put it in the correct part of the structure # master[datasetname][chronData][chron0][model][chron0model0][summaryTable][chron0model0summary0] elif _ms in ["ensembleTable", "summaryTable"]: # Collapse the keys in the table root if a table does not yet exist if _table_name not in master[dsn][pc][_m.group(1)]["model"][_m.group(1) + _m.group(2)][_ms]: _tmp_table = _collapse_table_root(current, dsn, pc) master[dsn][pc][_m.group(1)]["model"][_m.group(1) + _m.group(2)][_ms][_table_name] = _tmp_table # Collapse the keys at the column level, and return the column data _tmp_column = _collapse_column(current, pc) # Create the column entry in the table master[dsn][pc][_m.group(1)]["model"][_m.group(1) + _m.group(2)][_ms][_table_name]["columns"][_variable_name] = _tmp_column except Exception as e: print("Error: Unable to collapse column data: {}, {}".format(dsn, e)) logger_ts.error("collapse_paleo: {}, {}, {}".format(dsn, _variable_name, e)) # If these sections had any items added to them, then add them to the column master. return master
Collapse the paleo or chron for the current time series entry :param dict master: LiPD data (so far) :param dict current: Current time series entry :param str dsn: Dataset name :param str pc: paleoData or chronData :return dict master:
entailment
def _collapse_table_root(current, dsn, pc): """ Create a table with items in root given the current time series entry :param dict current: Current time series entry :param str dsn: Dataset name :param str pc: paleoData or chronData :return dict _tmp_table: Table data """ logger_ts.info("enter collapse_table_root") _table_name, _variable_name = _get_current_names(current, dsn, pc) _tmp_table = {'columns': {}} try: for k, v in current.items(): # These are the main table keys that we should be looking for for i in ['filename', 'googleWorkSheetKey', 'tableName', "missingValue", "tableMD5", "dataMD5"]: if i in k: try: _tmp_table[i] = v except Exception: # Not all keys are available. It's okay if we hit a KeyError. pass except Exception as e: print("Error: Unable to collapse: {}, {}".format(dsn, e)) logger_ts.error("collapse_table_root: Unable to collapse: {}, {}, {}".format(_table_name, dsn, e)) return _tmp_table
Create a table with items in root given the current time series entry :param dict current: Current time series entry :param str dsn: Dataset name :param str pc: paleoData or chronData :return dict _tmp_table: Table data
entailment
def _collapse_column(current, pc): """ Collapse the column data and :param current: :param pc: :return: """ _tmp_column = {} try: for k, v in current.items(): try: # We do not want to store these table keys at the column level. if not any(i in k for i in ["tableName", "google", "filename", "md5", "MD5"]): # ['paleoData', 'key'] m = k.split('_') # Is this a chronData or paleoData key? if pc in m[0] and len(m) >= 2: # Create a link to the growing column data tmp = _tmp_column # Loop for each key, not including the PC. Start at index 1 for idx, b in enumerate(m[1:]): # Are we at the last item in the list? if idx == len(m) - 2: # Set the value into the column data tmp[b] = v # All loops before the last item else: # Key already exists in the column if b in _tmp_column: # Move into the data structure and keep going tmp = _tmp_column[b] # Key does not exist yet else: # Create the data structure tmp[b] = {} # Move into the new data structure and keep going tmp = tmp[b] except Exception as e: logger_ts.error("collapse_column: loop: {}".format(e)) except Exception as e: logger_ts.error("collapse_column: {}".format(e)) return _tmp_column
Collapse the column data and :param current: :param pc: :return:
entailment
def mode_ts(ec, mode="", ts=None): """ Get string for the mode :param str ec: extract or collapse :param str mode: "paleo" or "chron" mode :param list ts: Time series (for collapse) :return str phrase: Phrase """ phrase = "" if ec == "extract": if mode=="chron": phrase = "extracting chronData..." else: phrase = "extracting paleoData..." elif ec == "collapse": if ts[0]["mode"] == "chronData": phrase = "collapsing chronData" else: phrase = "collapsing paleoData..." return phrase
Get string for the mode :param str ec: extract or collapse :param str mode: "paleo" or "chron" mode :param list ts: Time series (for collapse) :return str phrase: Phrase
entailment
def translate_expression(expression): """ Check if the expression is valid, then check turn it into an expression that can be used for filtering. :return list of lists: One or more matches. Each list has 3 strings. """ logger_ts.info("enter translate_expression") m = re_filter_expr.findall(expression) matches = [] if m: for i in m: logger_ts.info("parse match: {}".format(i)) tmp = list(i[1:]) if tmp[1] in COMPARISONS: tmp[1] = COMPARISONS[tmp[1]] tmp[0] = cast_float(tmp[0]) tmp[2] = cast_float(tmp[2]) matches.append(tmp) else: logger_ts.warn("translate_expression: invalid expression: {}".format(expression)) print("Invalid input expression") logger_ts.info("exit translate_expression") return matches
Check if the expression is valid, then check turn it into an expression that can be used for filtering. :return list of lists: One or more matches. Each list has 3 strings.
entailment
def get_matches(expr_lst, ts): """ Get a list of TimeSeries objects that match the given expression. :param list expr_lst: Expression :param list ts: TimeSeries :return list new_ts: Matched time series objects :return list idxs: Indices of matched objects """ logger_ts.info("enter get_matches") new_ts = [] idxs = [] match = False try: for idx, ts_data in enumerate(ts): for expr in expr_lst: try: val = ts_data[expr[0]] # Check what comparison operator is being used if expr[1] == 'in': # "IN" operator can't be used in get_truth. Handle first. if expr[2] in val: match = True elif match_operators(val, expr[1], expr[2]): # If it's a typical operator, check with the truth test. match = True else: # If one comparison is false, then it can't possibly be a match match = False break except KeyError as e: logger_ts.warn("get_matches: KeyError: getting value from TimeSeries object, {}, {}".format(expr, e)) match = False except IndexError as e: logger_ts.warn("get_matches: IndexError: getting value from TimeSeries object, {}, {}".format(expr, e)) match = False if match: idxs.append(idx) new_ts.append(ts_data) except AttributeError as e: logger_ts.debug("get_matches: AttributeError: unable to get expression matches, {}, {}".format(type(ts), e)) print("Error: Timeseries is an invalid data type") if not new_ts: print("No matches found for that expression") else: print("Found {} matches from {} columns".format(len(new_ts), len(ts))) logger_ts.info("exit get_matches") return new_ts, idxs
Get a list of TimeSeries objects that match the given expression. :param list expr_lst: Expression :param list ts: TimeSeries :return list new_ts: Matched time series objects :return list idxs: Indices of matched objects
entailment
def _to_http_hosts(hosts: Union[Iterable[str], str]) -> List[str]: """Convert a string of whitespace or comma separated hosts into a list of hosts. Hosts may also already be a list or other iterable. Each host will be prefixed with 'http://' if it is not already there. >>> _to_http_hosts('n1:4200,n2:4200') ['http://n1:4200', 'http://n2:4200'] >>> _to_http_hosts('n1:4200 n2:4200') ['http://n1:4200', 'http://n2:4200'] >>> _to_http_hosts('https://n1:4200') ['https://n1:4200'] >>> _to_http_hosts(['http://n1:4200', 'n2:4200']) ['http://n1:4200', 'http://n2:4200'] """ if isinstance(hosts, str): hosts = hosts.replace(',', ' ').split() return [_to_http_uri(i) for i in hosts]
Convert a string of whitespace or comma separated hosts into a list of hosts. Hosts may also already be a list or other iterable. Each host will be prefixed with 'http://' if it is not already there. >>> _to_http_hosts('n1:4200,n2:4200') ['http://n1:4200', 'http://n2:4200'] >>> _to_http_hosts('n1:4200 n2:4200') ['http://n1:4200', 'http://n2:4200'] >>> _to_http_hosts('https://n1:4200') ['https://n1:4200'] >>> _to_http_hosts(['http://n1:4200', 'n2:4200']) ['http://n1:4200', 'http://n2:4200']
entailment
def _plain_or_callable(obj): """Returns the value of the called object of obj is a callable, otherwise the plain object. Returns None if obj is None. >>> obj = None >>> _plain_or_callable(obj) >>> stmt = 'select * from sys.nodes' >>> _plain_or_callable(stmt) 'select * from sys.nodes' >>> def _args(): ... return [1, 'name'] >>> _plain_or_callable(_args) [1, 'name'] >>> _plain_or_callable((x for x in range(10))) 0 >>> class BulkArgsGenerator: ... def __call__(self): ... return [[1, 'foo'], [2, 'bar'], [3, 'foobar']] >>> _plain_or_callable(BulkArgsGenerator()) [[1, 'foo'], [2, 'bar'], [3, 'foobar']] """ if callable(obj): return obj() elif isinstance(obj, types.GeneratorType): return next(obj) else: return obj
Returns the value of the called object of obj is a callable, otherwise the plain object. Returns None if obj is None. >>> obj = None >>> _plain_or_callable(obj) >>> stmt = 'select * from sys.nodes' >>> _plain_or_callable(stmt) 'select * from sys.nodes' >>> def _args(): ... return [1, 'name'] >>> _plain_or_callable(_args) [1, 'name'] >>> _plain_or_callable((x for x in range(10))) 0 >>> class BulkArgsGenerator: ... def __call__(self): ... return [[1, 'foo'], [2, 'bar'], [3, 'foobar']] >>> _plain_or_callable(BulkArgsGenerator()) [[1, 'foo'], [2, 'bar'], [3, 'foobar']]
entailment
def _to_dsn(hosts): """Convert a host URI into a dsn for aiopg. >>> _to_dsn('aiopg://myhostname:4242/mydb') 'postgres://crate@myhostname:4242/mydb' >>> _to_dsn('aiopg://myhostname:4242') 'postgres://crate@myhostname:4242/doc' >>> _to_dsn('aiopg://hoschi:pw@myhostname:4242/doc?sslmode=require') 'postgres://hoschi:pw@myhostname:4242/doc?sslmode=require' >>> _to_dsn('aiopg://myhostname') 'postgres://crate@myhostname:5432/doc' """ p = urlparse(hosts) try: user_and_pw, netloc = p.netloc.split('@', maxsplit=1) except ValueError: netloc = p.netloc user_and_pw = 'crate' try: host, port = netloc.split(':', maxsplit=1) except ValueError: host = netloc port = 5432 dbname = p.path[1:] if p.path else 'doc' dsn = f'postgres://{user_and_pw}@{host}:{port}/{dbname}' if p.query: dsn += '?' + '&'.join(k + '=' + v[0] for k, v in parse_qs(p.query).items()) return dsn
Convert a host URI into a dsn for aiopg. >>> _to_dsn('aiopg://myhostname:4242/mydb') 'postgres://crate@myhostname:4242/mydb' >>> _to_dsn('aiopg://myhostname:4242') 'postgres://crate@myhostname:4242/doc' >>> _to_dsn('aiopg://hoschi:pw@myhostname:4242/doc?sslmode=require') 'postgres://hoschi:pw@myhostname:4242/doc?sslmode=require' >>> _to_dsn('aiopg://myhostname') 'postgres://crate@myhostname:5432/doc'
entailment
def _verify_ssl_from_first(hosts): """Check if SSL validation parameter is passed in URI >>> _verify_ssl_from_first(['https://myhost:4200/?verify_ssl=false']) False >>> _verify_ssl_from_first(['https://myhost:4200/']) True >>> _verify_ssl_from_first([ ... 'https://h1:4200/?verify_ssl=False', ... 'https://h2:4200/?verify_ssl=True' ... ]) False """ for host in hosts: query = parse_qs(urlparse(host).query) if 'verify_ssl' in query: return _to_boolean(query['verify_ssl'][0]) return True
Check if SSL validation parameter is passed in URI >>> _verify_ssl_from_first(['https://myhost:4200/?verify_ssl=false']) False >>> _verify_ssl_from_first(['https://myhost:4200/']) True >>> _verify_ssl_from_first([ ... 'https://h1:4200/?verify_ssl=False', ... 'https://h2:4200/?verify_ssl=True' ... ]) False
entailment
def addTable(D): """ Add any table type to the given dataset. Use prompts to determine index locations and table type. :param dict D: Metadata (dataset) :param dict dat: Metadata (table) :return dict D: Metadata (dataset) """ _swap = { "1": "measurement", "2": "summary", "3": "ensemble", "4": "distribution" } print("What type of table would you like to add?\n" "1: measurement\n" "2: summary\n" "3: ensemble (under development)\n" "4: distribution (under development)\n" "\n Note: if you want to add a whole model, use the addModel() function") _ans = input(">") if _ans in ["3", "4"]: print("I don't know how to do that yet.") # if this is a summary or measurement, split the csv into each column elif _ans in ["1", "2"]: # read in a csv file. have the user point to it print("Locate the CSV file with the values for this table: ") _path, _files = browse_dialog_file() _path = _confirm_file_path(_files) _values = read_csv_from_file(_path) _table = _build_table(_values) _placement = _prompt_placement(D, _swap[_ans]) D = _put_table(D, _placement, _table) else: print("That's not a valid option") return D
Add any table type to the given dataset. Use prompts to determine index locations and table type. :param dict D: Metadata (dataset) :param dict dat: Metadata (table) :return dict D: Metadata (dataset)
entailment
def _get_available_placements(D, tt): """ Called from: _prompt_placement() Get a list of possible places that we can put the new model data into. If no model exists yet, we'll use something like chron0model0. If other models exist, we'll go for the n+1 entry. ex: chron0model0 already exists, so we'll look to chron0model1 next. :param dict D: Metadata :param str tt: Table Type :return list _options: Possible placements """ _options = [] try: for _pc in ["paleoData", "chronData"]: if _pc in D: # for each entry in pc for section_name, section_data in D[_pc].items(): # looking for open spots for measurement tables if tt == "measurement": if "measurementTable" in section_data: _options.append(_get_available_placements_1(section_data["measurementTable"], section_name, "measurement")) # looking for open spots for model tables else: # Is there a model? Need model data to keep going if "model" in section_data: # this is for adding a whole model (all 4 tables, ens/dist/sum/method) if tt == "model": _options.append(_get_available_placements_1(section_data["model"], section_name, "model")) else: # for adding individual model tables for _k, _v in section_data["model"]: # keys here are stored as "<type>Table", so add "Table" to each table type _tt_table = "{}Table".format(tt) # does this table exist? if _tt_table in _v: # Get the first available position for this section _options.append( _get_available_placements_1(_v[_tt_table], _k, tt)) else: # Doesn't currently exist. Make the first option index 0. _options.append("{}{}0".format(_k, tt)) # no models present, so we automatically default placement options to the 0 index. else: if tt == "model": # adding a whole model, so no need to be specific _options.append("{}model0".format(section_name)) else: # adding a specific table, so the position is more specific also _options.append("{}model0{}0".format(section_name, tt)) except Exception as e: sys.exit("Looking for open table positions: Unable to find placement options, {}".format(e)) # remove empty names _options = [i for i in _options if i] # Is the whole list empty? that's not good. if not _options: sys.exit("Error: No available positions found to place new data. Something went wrong.") return _options
Called from: _prompt_placement() Get a list of possible places that we can put the new model data into. If no model exists yet, we'll use something like chron0model0. If other models exist, we'll go for the n+1 entry. ex: chron0model0 already exists, so we'll look to chron0model1 next. :param dict D: Metadata :param str tt: Table Type :return list _options: Possible placements
entailment
def _prompt_placement(D, tt): """ Since automatic placement didn't work, find somewhere to place the model data manually with the help of the user. :param dict D: Metadata :param str tt: Table type :return str _model_name: Chosen model name for placement """ _model_name = "" # There wasn't a table name match, so we need prompts to fix it _placement_options = _get_available_placements(D, tt) print("Please choose where you'd like to place this model:") for _idx, _opt in enumerate(_placement_options): print("({}) {}".format(_idx, _opt)) _choice = input("> ") try: if int(_choice) <= len(_placement_options) and _choice: # Get the option the user chose _model_name = _placement_options[int(_choice)] else: # They user chose an option out of the placement list range print("Invalid choice input") return except Exception as e: # Choice was not a number or empty print("Invalid choice") return _model_name
Since automatic placement didn't work, find somewhere to place the model data manually with the help of the user. :param dict D: Metadata :param str tt: Table type :return str _model_name: Chosen model name for placement
entailment
def _put_table(D, name, table): """ Use the dataset and name to place the new table data into the dataset. :param dict D: Dataset :param str name: Table name / path to store new table :param dict table: Newly created table data :return dict D: Dataset """ try: # print("Placing table: {}".format(name)) table["tableName"] = name m = re.match(re_table_name, name) if m: _pc = m.group(1) + "Data" _section = m.group(1) + m.group(2) # place a measurement table if m.group(3) == "measurement": # This shouldn't happen. User chose one of our options. That should be an empty location. if name in D[_pc][_section]["measurementTable"]: print("Oops. This shouldn't happen. That table path is occupied in the dataset") # Place the data else: D[_pc][_section]["measurementTable"][name] = table # place a model table type else: _model = _section + m.group(3) + m.group(4) _tt = m.group(5) + "Table" if name in D[_pc][_model][_tt]: print("Oops. This shouldn't happen. That table path is occupied in the dataset") else: D[_pc][_model][_tt][name] = table else: print("Oops. This shouldn't happen. That table name doesn't look right. Please report this error") return except Exception as e: print("addTable: Unable to put the table data into the dataset, {}".format(e)) return D
Use the dataset and name to place the new table data into the dataset. :param dict D: Dataset :param str name: Table name / path to store new table :param dict table: Newly created table data :return dict D: Dataset
entailment
def _update_table_names(name, dat): """ Model placement is subject to change. That means all names within the model (names are path-dependent) are also subject to change. Whichever name is decided, the inner data needs to match it. :param dict dat: Metadata :param str name: Table name :return dict dat: Metadata """ for _tabletype in ["summary", "distribution", "ensemble"]: _ttname = "{}Table".format(_tabletype) if _ttname in dat: _new_tables = OrderedDict() _idx = 0 # change all the top level table names for k,v in dat[_ttname].items(): _new_ttname= "{}{}{}".format(name, _tabletype, _idx) _idx +=1 #change all the table names in the table metadata v["tableName"] = _new_ttname # remove the filename. It shouldn't be stored anyway if "filename" in v: v["filename"] = "" # place dat into the new ordered dictionary _new_tables[_new_ttname] = v # place new tables into the original dat dat[_ttname] = _new_tables return dat
Model placement is subject to change. That means all names within the model (names are path-dependent) are also subject to change. Whichever name is decided, the inner data needs to match it. :param dict dat: Metadata :param str name: Table name :return dict dat: Metadata
entailment
def addModel(D, models): """ Insert model data into a LiPD dataset Examples of model naming: chron0model0 chron0model1 chron1model0 Example of 'models' variable: { chron0model0: { "method": {...}, "summaryTable": [...], "ensembleTable": [...], "distributionTable: [...] }, chron0model1:... } :param dict D: Metadata (dataset) :param dict models: Model data to add :return dict D: Metadata (dataset) """ try: # Loop for each model that needs to be added for _model_name, _model_data in models.items(): # split the table name into a path that we can use _m = re.match(re_model_name, _model_name) if _m: D = _put_model(D, _model_name, _model_data, _m) else: print("The table name found in the given model data isn't valid for automatic placement") _placement_name = _prompt_placement(D, "model") _m = re.match(re_model_name, _placement_name) if _m: D = _put_model(D, _placement_name, _model_data, _m) else: print("Oops. This shouldn't happen. That table name doesn't look right. Please report this error") return except Exception as e: print("addModel: Model data NOT added, {}".format(e)) return D
Insert model data into a LiPD dataset Examples of model naming: chron0model0 chron0model1 chron1model0 Example of 'models' variable: { chron0model0: { "method": {...}, "summaryTable": [...], "ensembleTable": [...], "distributionTable: [...] }, chron0model1:... } :param dict D: Metadata (dataset) :param dict models: Model data to add :return dict D: Metadata (dataset)
entailment
def _put_model(D, name, dat, m): """ Place the model data given, into the location (m) given. :param dict D: Metadata (dataset) :param str name: Model name (ex: chron0model0) :param dict dat: Model data :param regex m: Model name regex groups :return dict D: Metadata (dataset) """ try: # print("Placing model: {}".format(name)) _pc = m.group(1) + "Data" _section = m.group(1) + m.group(2) if _pc not in D: # Section missing entirely? Can't continue print("{} not found in the provided dataset. Please try again".format(_pc)) return else: if _section not in D[_pc]: # Creates section: Example: D[chronData][chron0] D[_pc][_section] = OrderedDict() if "model" not in D[_pc][_section]: # Creates model top level: Example: D[chronData][chron0]["model"] D[_pc][_section]["model"] = OrderedDict() if name not in D[_pc][_section]["model"]: dat = _update_table_names(name, dat) D[_pc][_section]["model"][name] = dat else: # Model already exists, should we overwrite it? _prompt_overwrite = input( "This model already exists in the dataset. Do you want to overwrite it? (y/n)") # Yes, overwrite with the model data provided if _prompt_overwrite == "y": dat = _update_table_names(name, dat) D[_pc][_section]["model"][name] = dat # No, do not overwrite. elif _prompt_overwrite == "n": _name2 = _prompt_placement(D, "model") _m = re.match(re_model_name, _name2) if _m: D = _put_model(D, _name2, dat, _m) else: print("Invalid choice") except Exception as e: print("addModel: Unable to put the model data into the dataset, {}".format(e)) return D
Place the model data given, into the location (m) given. :param dict D: Metadata (dataset) :param str name: Model name (ex: chron0model0) :param dict dat: Model data :param regex m: Model name regex groups :return dict D: Metadata (dataset)
entailment
def prepare_env(app, env, docname): """ Prepares the sphinx environment to store sphinx-needs internal data. """ if not hasattr(env, 'needs_all_needs'): # Used to store all needed information about all needs in document env.needs_all_needs = {} if not hasattr(env, 'needs_functions'): # Used to store all registered functions for supporting dynamic need values. env.needs_functions = {} # needs_functions = getattr(app.config, 'needs_functions', []) needs_functions = app.needs_functions if needs_functions is None: needs_functions = [] if not isinstance(needs_functions, list): raise SphinxError('Config parameter needs_functions must be a list!') # Register built-in functions for need_common_func in needs_common_functions: register_func(env, need_common_func) # Register functions configured by user for needs_func in needs_functions: register_func(env, needs_func) app.config.needs_hide_options += ['hidden'] app.config.needs_extra_options['hidden'] = directives.unchanged if not hasattr(env, 'needs_workflow'): # Used to store workflow status information for already executed tasks. # Some tasks like backlink_creation need be be performed only once. # But most sphinx-events get called several times (for each single document file), which would also # execute our code several times... env.needs_workflow = { 'backlink_creation': False, 'dynamic_values_resolved': False }
Prepares the sphinx environment to store sphinx-needs internal data.
entailment
def make_entity_name(name): """Creates a valid PlantUML entity name from the given value.""" invalid_chars = "-=!#$%^&*[](){}/~'`<>:;" for char in invalid_chars: name = name.replace(char, "_") return name
Creates a valid PlantUML entity name from the given value.
entailment
def main(self): """ Load in the template file, and run through the parser :return none: """ logger_lpd_noaa.info("enter main") # Starting Directory: dir_tmp/dir_bag/data/ # convert all lipd keys to noaa keys # timestamp the conversion of the file # MISC SETUP FUNCTIONS self.noaa_data_sorted["File_Last_Modified_Date"]["Modified_Date"] = generate_timestamp() self.__get_table_count() # Get measurement tables from metadata, and sort into object self self.__put_tables_in_self(["paleo", "paleoData", "measurementTable"]) self.__put_tables_in_self(["chron", "chronData", "measurementTable"]) # how many measurement tables exist? this will tell use how many noaa files to create self.__get_table_pairs() # reorganize data into noaa sections self.__reorganize() # special case: earliest_year, most_recent_year, and time unit # self.__check_time_values() # self.__check_time_unit() self.__get_overall_data(self.lipd_data) self.__reorganize_sensor() self.__lists_to_str() self.__generate_study_name() # END MISC SETUP FUNCTIONS # Use data in steps_dict to write to # self.noaa_data_sorted = self.__key_conversion(self.noaa_data_sorted) self.__create_file() logger_lpd_noaa.info("exit main") return
Load in the template file, and run through the parser :return none:
entailment
def __check_time_values(self): """ Rules 1. AD or CE units: bigger number is recent, smaller number is older 2. BP: bigger number is older, smaller number is recent. 3. No units: If max year is 1900-2017(current), then assume AD. Else, assume BP :return none: """ _earliest = float(self.noaa_data_sorted["Data_Collection"]["Earliest_Year"]) _recent = float(self.noaa_data_sorted["Data_Collection"]["Most_Recent_Year"]) try: _unit = self.noaa_data_sorted["Data_Collection"]["Time_Unit"] except Exception: _unit = "" if not _unit: # If the max value is between 1900 - 2017 (current), then assume "AD" _max = max([_earliest, _recent]) _min = min([_earliest, _recent]) if _max >= 1900 and _max <= 2018: self.noaa_data_sorted["Data_Collection"]["Time_Unit"] = "AD" self.noaa_data_sorted["Data_Collection"]["Most_Recent_Year"] = str(_max) self.noaa_data_sorted["Data_Collection"]["Earliest_Year"] = str(_min) # Else, assume it's BP else: # Units don't exist, assume BP self.noaa_data_sorted["Data_Collection"]["Time_Unit"] = "BP" self.noaa_data_sorted["Data_Collection"]["Most_Recent_Year"] = str(_min) self.noaa_data_sorted["Data_Collection"]["Earliest_Year"] = str(_max) else: # Units exist if _unit.lower() in ["ad", "ce"]: if _earliest > _recent: self.noaa_data_sorted["Data_Collection"]["Most_Recent_Year"] = str(_earliest) self.noaa_data_sorted["Data_Collection"]["Earliest_Year"] = str(_recent) else: if _recent > _earliest: self.noaa_data_sorted["Data_Collection"]["Most_Recent_Year"] = str(_earliest) self.noaa_data_sorted["Data_Collection"]["Earliest_Year"] = str(_recent) return
Rules 1. AD or CE units: bigger number is recent, smaller number is older 2. BP: bigger number is older, smaller number is recent. 3. No units: If max year is 1900-2017(current), then assume AD. Else, assume BP :return none:
entailment
def __convert_keys_2(header, d): """ Convert lpd to noaa keys for this one section :param str header: Section header :param dict d: Metadata :return dict: Metadata w/ converted keys """ d_out = {} try: for k, v in d.items(): try: noaa_key = LIPD_NOAA_MAP_BY_SECTION[header][k] d_out[noaa_key] = v except Exception: logger_lpd_noaa.warn("lpd_noaa: convert_keys_section: ran into an error converting {}".format(k)) except KeyError: logger_lpd_noaa.warn("lpd_noaa: convert_keys_section: KeyError: header key {} is not in NOAA_ALL_DICT".format(header)) except AttributeError: logger_lpd_noaa.warn("lpd_noaa: convert_keys_section: AttributeError: metdata is wrong data type".format(header)) return d return d_out
Convert lpd to noaa keys for this one section :param str header: Section header :param dict d: Metadata :return dict: Metadata w/ converted keys
entailment
def __convert_keys_1(self, header, d): """ Loop over keys in a dictionary and replace the lipd keys with noaa keys :return: """ d2 = {} try: for k, v in d.items(): try: d2[self.__get_noaa_key_w_context(header, k)] = v except KeyError: pass except Exception: return d return d2
Loop over keys in a dictionary and replace the lipd keys with noaa keys :return:
entailment
def __create_blanks(section_name, d): """ All keys need to be written to the output, with or without a value. Furthermore, only keys that have values exist at this point. We need to manually insert the other keys with a blank value. Loop through the global list to see what's missing in our dict. :param str section_name: Retrieve data from global dict for this section :return none: """ try: for key in NOAA_KEYS_BY_SECTION[section_name]: if key not in d: # Key not in our dict. Create the blank entry. d[key] = "" except Exception: logger_lpd_noaa.error("lpd_noaa: create_blanks: must section: {}, key".format(section_name, key)) return d
All keys need to be written to the output, with or without a value. Furthermore, only keys that have values exist at this point. We need to manually insert the other keys with a blank value. Loop through the global list to see what's missing in our dict. :param str section_name: Retrieve data from global dict for this section :return none:
entailment
def __flatten_col(d): """ Flatten column so climateInterpretation and calibration are not nested. :param d: :return: """ try: for entry in ["climateInterpretation", "calibration"]: if entry in d: for k, v in d[entry].items(): d[k] = v del d[entry] except AttributeError: pass return d
Flatten column so climateInterpretation and calibration are not nested. :param d: :return:
entailment
def __generate_study_name(self): """ When a study name is not given, generate one with the format of " author - site name - year " :return str study_name: generated study name """ study_name = "" _exist = False try: if self.noaa_data_sorted["Top"]["Study_Name"]: _exist = True except KeyError: pass if not _exist: try: _site = self.noaa_data_sorted["Site_Information"]["properties"]["siteName"] _year = self.noaa_data_sorted["Publication"][0]["pubYear"] _author = self.noaa_data_sorted["Publication"][0]["author"] _author = self.__get_author_last_name(_author) study_name = "{}.{}.{}".format(_author, _site, _year) study_name = study_name.replace(" ", "_").replace(",", "_") except (KeyError, Exception): pass self.noaa_data_sorted["Top"]["Study_Name"] = study_name self.noaa_data_sorted["Title"]["Study_Name"] = study_name return
When a study name is not given, generate one with the format of " author - site name - year " :return str study_name: generated study name
entailment
def __lists_to_str(self): """ There are some data lists that we collected across the dataset that need to be concatenated into a single string before writing to the text file. :return none: """ # ["archive_type", "sensor_genus", "sensor_species", "investigator"] if self.lsts_tmp["archive"]: self.noaa_data_sorted["Top"]["Archive"] = ",".join(self.lsts_tmp["archive"]) if self.lsts_tmp["species"]: self.noaa_data_sorted["Species"]["Species_Name"] = ",".join(self.lsts_tmp["species"]) if self.lsts_tmp["genus"]: self.noaa_data_sorted["Species"]["Species_Code"] = ",".join(self.lsts_tmp["genus"]) if self.lsts_tmp["qc"]: if self.__is_notes(): self.noaa_data_sorted["Description_Notes_and_Keywords"]["Description"] = ";".join(self.lsts_tmp["qc"]) return
There are some data lists that we collected across the dataset that need to be concatenated into a single string before writing to the text file. :return none:
entailment
def __parse_dois(self, x): """ Parse the Dataset_DOI field. Could be one DOI string, or a list of DOIs :param any x: Str or List of DOI ids :return none: list is set to self """ # datasetDOI is a string. parse, validate and return a list of DOIs if isinstance(x, str): # regex cleans string, and returns a list with 1 entry for each regex doi match m = clean_doi(x) # make sure m is not an empty list if m: # set list directly into self self.doi = m # datasetDOI is a list. use regex to validate each doi entry. elif isinstance(x, list): for entry in x: # regex cleans string, and returns a list with 1 entry for each regex doi match m = clean_doi(entry) # make sure m is not an empty list if m: # combine lists with existing self list self.doi += m return
Parse the Dataset_DOI field. Could be one DOI string, or a list of DOIs :param any x: Str or List of DOI ids :return none: list is set to self
entailment
def __split_path(string): """ Used in the path_context function. Split the full path into a list of steps :param str string: Path string ("geo-elevation-height") :return list out: Path as a list of strings. One entry per path step.(["geo", "elevation", "height"]) """ out = [] position = string.find(':') if position != -1: # A position of 0+ means that ":" was found in the string key = string[:position] val = string[position+1:] out.append(key) out.append(val) if ('-' in key) and ('Funding' not in key) and ('Grant' not in key): out = key.split('-') out.append(val) return out
Used in the path_context function. Split the full path into a list of steps :param str string: Path string ("geo-elevation-height") :return list out: Path as a list of strings. One entry per path step.(["geo", "elevation", "height"])
entailment
def _values_exist(table): """ Check that values exist in this table, and we can write out data columns :param dict table: Table data :return bool: Values exist or not exist """ try: for var, data in table["columns"].items(): if "values" in data: return True except KeyError as e: logger_lpd_noaa.warn("values_exist: KeyError: {}".format(e)) except Exception as e: logger_lpd_noaa.warn("values_exist: Excpetion: {}".format(e)) return False
Check that values exist in this table, and we can write out data columns :param dict table: Table data :return bool: Values exist or not exist
entailment
def __reorganize(self): """ Reorganize the keys into their proper section order for the NOAA output file DO NOT parse data tables (paleoData or chronData). We will do those separately. :param str key: :param any value: :return none: """ logger_lpd_noaa.info("enter reorganize") # NOAA files are organized in sections differently than NOAA. try to translate these sections. for key, value in self.lipd_data.items(): # if this key has a noaa match, it'll be returned. otherwise, empty string for no match noaa_key = self.__get_noaa_key(key) # check if this lipd key is in the NOAA_KEYS conversion dictionary. # if it's not, then stash it in our ignore list. if key not in LIPD_NOAA_MAP_FLAT: self.noaa_data_sorted["Ignore"][noaa_key] = value # studyName is placed two times in file. Line #1, and under the 'title' section elif noaa_key == "Study_Name": # study name gets put in two locations self.noaa_data_sorted["Top"][noaa_key] = value self.noaa_data_sorted["Title"][noaa_key] = value # put archiveType in self, because we'll reuse it later for the 9-part-variables as well elif noaa_key == "Archive": self.lsts_tmp["archive"].append(value) # Dataset_DOI is a repeatable element. the key could be a single DOI, or a list of DOIs. elif noaa_key == "Dataset_DOI": self.__parse_dois(value) # all other keys. determine which noaa section they belong in. else: # noaa keys are sorted by section. for header, content in NOAA_KEYS_BY_SECTION.items(): try: # if our key is a noaa header key, then that means it's the ONLY key in the section. # set value directly if noaa_key == header: self.noaa_data_sorted[header] = value # all other cases, the key is part of the section elif noaa_key in content: self.noaa_data_sorted[header][noaa_key] = value except KeyError: # this shouldn't ever really happen, but just in case logger_lpd_noaa.warn("lpd_noaa: reorganize: KeyError: {}".format(noaa_key)) return
Reorganize the keys into their proper section order for the NOAA output file DO NOT parse data tables (paleoData or chronData). We will do those separately. :param str key: :param any value: :return none:
entailment
def __reorganize_author(self): """ LiPD delimits author names by "and". Noaa wants them to be semi-colon delimited. :return none: """ try: for idx, pub in enumerate(self.noaa_data_sorted["Publication"]): if "author" in pub: _str = pub["author"] if " and " in _str: self.noaa_data_sorted["Publication"][idx]["author"] = _str.replace(" and ", "; ") if ";" in _str: self.noaa_data_sorted["Publication"][idx]["author"] = _str.replace(";", "; ") except Exception: pass return
LiPD delimits author names by "and". Noaa wants them to be semi-colon delimited. :return none:
entailment
def __reorganize_coordinates(self): """ GEOJSON FORMAT : [ LONGITUDE, LATITUDE, ELEVATION] Reorganize coordinates based on how many values are available. :return: """ try: l = self.noaa_data_sorted["Site_Information"]['geometry']['coordinates'] locations = ["Northernmost_Latitude", "Southernmost_Latitude", "Easternmost_Longitude", "Westernmost_Longitude", "Elevation"] logger_lpd_noaa.info("coordinates: {} coordinates found".format(len(l))) # Amount of coordinates in the list _len_coords = len(l) # Odd number of coordinates. Elevation value exists if _len_coords % 2 == 1: # Store the elevation, which is always the last value in the list self.noaa_geo["Elevation"] = l[-1] # If elevation, then subtract one from the length _len_coords -= 1 # Start compiling the lat lon coordinates # 0 coordinate values. fill in locations with empty values if _len_coords == 0: for location in locations: self.noaa_geo[location] = ' ' # 2 coordinates values. duplicate to fill 4 location slots. elif _len_coords == 2: self.noaa_geo[locations[0]] = l[1] self.noaa_geo[locations[1]] = l[1] self.noaa_geo[locations[2]] = l[0] self.noaa_geo[locations[3]] = l[0] # 4 coordinate values. put each in its correct location slot. elif _len_coords == 4: for index, location in enumerate(locations): self.noaa_geo[locations[index]] = l[index] else: logger_lpd_noaa.info("coordinates: too many coordinates given") except KeyError: logger_lpd_noaa.info("lpd_noaa: coordinates: no coordinate information") except Exception: logger_lpd_noaa.error("lpd_noaa: coordinates: unknown exception") return
GEOJSON FORMAT : [ LONGITUDE, LATITUDE, ELEVATION] Reorganize coordinates based on how many values are available. :return:
entailment
def __reorganize_funding(self): """ Funding gets added to noaa_data_sorted with LiPD keys. Change those keys to NOAA :return none: """ _map = {"agency": "Funding_Agency_Name", "grant": "Grant"} try: _l = [] for item in self.noaa_data_sorted["Funding_Agency"]: _tmp = {} for lpd_name, noaa_name in _map.items(): val = "" if lpd_name in item: val = item[lpd_name] _tmp[noaa_name] = val _l.append(_tmp) self.noaa_data_sorted["Funding_Agency"] = _l except Exception: pass return
Funding gets added to noaa_data_sorted with LiPD keys. Change those keys to NOAA :return none:
entailment
def __reorganize_geo(self): """ Concat geo value and units, and reorganize the rest References geo data from self.noaa_data_sorted Places new data into self.noaa_geo temporarily, and then back into self.noaa_data_sorted. :return: """ logger_lpd_noaa.info("enter reorganize_geo") try: # Geo -> Properties for k, v in self.noaa_data_sorted["Site_Information"]['properties'].items(): noaa_key = self.__get_noaa_key(k) self.noaa_geo[noaa_key] = v except KeyError: logger_lpd_noaa.info("reorganize_geo: KeyError: geo properties") try: # Geo -> Geometry self.__reorganize_coordinates() except Exception: logger_lpd_noaa.warning("reorganize_geo: Exception: missing required data: coordinates") # put the temporarily organized data into the self.noaa_data_sorted self.noaa_data_sorted["Site_Information"] = self.noaa_geo return
Concat geo value and units, and reorganize the rest References geo data from self.noaa_data_sorted Places new data into self.noaa_geo temporarily, and then back into self.noaa_data_sorted. :return:
entailment
def __reorganize_sensor(self): """ We have raw sensorGenus, and sensorSpecies in self, now clean and sort :return none: """ _code = [] _name = [] # Check if any of the sensor data is misplaced, and create corrected lists. if self.lsts_tmp["genus"]: for name in self.lsts_tmp["genus"]: if len(name) == 4 and name.isupper(): _code.append(name) else: _name.append(name) if self.lsts_tmp["species"]: for name in self.lsts_tmp["species"]: if len(name) != 4 and not name.isupper(): _name.append(name) else: _code.append(name) # Set the strings into the noaa data sorted self.lsts_tmp["species"] = _name self.lsts_tmp["genus"] = _code return
We have raw sensorGenus, and sensorSpecies in self, now clean and sort :return none:
entailment
def __put_names_on_csv_cols(names, cols): """ Put the variableNames with the corresponding column data. :param list names: variableNames :param list cols: List of Lists of column data :return dict: """ _combined = {} for idx, name in enumerate(names): # Use the variableName, and the column data from the same index _combined[name] = cols[idx] return _combined
Put the variableNames with the corresponding column data. :param list names: variableNames :param list cols: List of Lists of column data :return dict:
entailment
def __put_year_col_first(d): """ Always write year column first. Reorder dictionary so that year is first :param dict d: data :return dict: Reordered data """ if "year" in d: D = OrderedDict() # store the year column first D["year"] = d["year"] for k,v in d.items(): if k != "year": # store the other columns D[k] = v return D else: # year is not found, return data as-is return d
Always write year column first. Reorder dictionary so that year is first :param dict d: data :return dict: Reordered data
entailment