content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def resultstocsv(results): """ pretty useless but its here, just use the function. """ results.to_csv('../../SARIMAX_hyperparameter_results.csv') return results
6050032f6d4f7d90885bb736d02749e587a06095
16,901
def is_calibration_point_finished(message): """Check if calibration for a calibration marker is done""" return "manual_marker_calibration" in str(message[b"name"]) and "Sampled" in str( message[b"msg"] )
709479eeb552f563b690bee7140120893baa2c06
16,902
def get_ellip(q): """Given minor to major axis ratio (q) Returns ellipticity""" return (1-q**2)/(1+q**2)
56f1b2c9d5e821cc55344a3d387efafd82414224
16,903
def clean_up_tokenization(text): """ Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms. From https://github.com/huggingface/transformers/blob/master/src/transformers/tokenization_utils.py#L1400 """ despace_substrings = [".", "?", "!", ",", "'", "n't", "'m", "'s", "'ve", "'re"] for s in despace_substrings: text = text.replace(f" {s}", f"{s}") replacements = {"do not":"don't"} for k,v in replacements: text = text.replace(f" {k}", f" {v}") return text
8ecb329e89ddb9a49c23ee4745b574e66359cc6e
16,904
def wrap_negative(value, bits): """ Make a bitmask of a value, even if it is a negative value ! """ upper_limit = (1 << (bits)) - 1 lower_limit = -(1 << (bits - 1)) if value not in range(lower_limit, upper_limit + 1): raise ValueError('Cannot encode {} in {} bits [{},{}]'.format( value, bits, lower_limit, upper_limit)) mask = (1 << bits) - 1 bit_value = value & mask # Performing bitwise and makes it 2 complement. assert bit_value >= 0 return bit_value
a5863b58f5aee34814142a0e2a659cf1baf2c132
16,905
def create_playlist_for_user(spotify_obj, spotify_username, playlist_name): """Method that creates a playlist with given name for given username, using authorized spotipy.Spotify object. Created playlist ID is returned.""" playlist = spotify_obj.user_playlist_create(spotify_username, playlist_name) return playlist['id']
7a45d8250f58f58bb17ba4dfad0bb73bf9a1796a
16,907
import numpy def _gen_sum_vector(datasize, filtersize): """Generate base down sampling vector (down sampling operator for one element) """ ### for mean down sampling # generate 1D zero vector to calculate first element of down sampled ancestor base_dsvector = numpy.zeros(shape=numpy.prod(datasize)) # mean downsampling for single point # generate pair of indices for down sampling using numpy.meshgrid grid_idx = numpy.meshgrid(*[range(0, filtersize[i]) for i in range(len(filtersize))]) ds_idx = grid_idx[0].flatten('C') # convert pair of indices for each axis to index of 1D array # e.g. The indices of 2D downsampling of 4 pixels are (0,0),(1,0),(0,1),(1,1). # if ancestor is 32x32px image, these indices are converted to 0, 1, 32, 33 for i in range(1, len(datasize)): ds_idx += datasize[i-1] * grid_idx[i].flatten('C') # base_dsvector[ds_idx] = 1 / len(ds_idx) base_dsvector[ds_idx] = 1 return base_dsvector
b97e5a6fcffe3272e14fe8f87f0f7bb5588f0fcd
16,908
def _new_stats_dict(): """ _new_stats_dict function creates a new stats dictionary containing 18 lists with the following keys: vtime, pid, tgid, uid, cpu_ticks, disp, gps, wifi_snd_pkts, wifi_rcv_pkts, 3g_low, 3g_high, calling, cpu_en, display_en, gps_en, wifi_en, 3g_en, total_en """ return {'time':[], 'pid':[], 'tgid':[], 'uid':[], 'cpu_ticks':[], 'disp':[], 'gps':[], 'wifi_snd_pkts':[], 'wifi_rcv_pkts':[], '3g_low':[], '3g_high':[], 'calling':[], 'cpu_en':[], 'display_en':[], 'gps_en':[], 'wifi_en':[], '3g_en':[], 'total_en':[] }
49f1d0451036b7cd773b52bd7a5b963897abbfdb
16,909
def keychord(m) -> str: """A single key with modifiers""" return "-".join(m.modifier_list + [m.any_key])
78d21f67e80e7d1dbf5e07c270221df46cd334c0
16,910
import pickle def get_reddit_model(fname='models/reddit_regression.pkl'): """ Load pre-trained reddit model from pickle file """ with open(fname, 'rb') as fid: reddit_model = pickle.load(fid) return reddit_model
8bab0ff3811067830ad9d1a4da824ffa85bda86e
16,911
def with_dataframe(df, method, **kwargs): """ 包装pandas的dataframe方法为pipeline形式 """ func = getattr(df, method) new_df = func(**kwargs) return df if new_df is None or ('inplace' in kwargs and kwargs['inplace']) else new_df
6fd6da1c2dceefda5087cb90021a7b4b783b7c62
16,912
import os def rgba_points_path(): """Path to a pair of GeoJSON points. This is not a valid cutline.""" return os.path.join(os.path.dirname(__file__), "data/rgba_points.geojson")
5c4fe55bd6859df14932b8ba3f20249d996cd222
16,914
def get_sharenums(tw_vectors): """ :param tw_vectors: See ``allmydata.interfaces.TestAndWriteVectorsForShares``. :return set[int]: The share numbers which the given test/write vectors would write to. """ return set( sharenum for (sharenum, (test, data, new_length)) in tw_vectors.items() if data )
07764f2920fc01cb0a66e0d421f98f6f59ab806e
16,915
def closing_dropout(params): """Removing all dropouts""" for k, v in params.values().items(): if 'dropout' in k: setattr(params, k, 0.0) # consider closing label smoothing if 'label_smoothing' in k: setattr(params, k, 0.0) return params
bed941fd2037aaedb4c8da3a766682c29143a741
16,916
def inflate_dict(dct, sep=".", deep=-1): """Inflates a flattened dict. Will look in simple dict of string key with string values to create a dict containing sub dicts as values. Samples are better than explanation: >>> from pprint import pprint as pp >>> pp(inflate_dict({'a.x': 3, 'a.y': 2})) {'a': {'x': 3, 'y': 2}} The keyword argument ``sep`` allows to change the separator used to get subpart of keys: >>> pp(inflate_dict({'etc/group': 'geek', 'etc/user': 'bob'}, "/")) {'etc': {'group': 'geek', 'user': 'bob'}} Warning: you cannot associate a value to a section: >>> inflate_dict({'section.key': 3, 'section': 'bad'}) Traceback (most recent call last): ... TypeError: 'str' object does not support item assignment Of course, dict containing only keys that doesn't use separator will be returned without changes: >>> inflate_dict({}) {} >>> inflate_dict({'a': 1}) {'a': 1} Argument ``deep``, is the level of deepness allowed to inflate dict: >>> pp(inflate_dict({'a.b.c': 3, 'a.d': 4}, deep=1)) {'a': {'b.c': 3, 'd': 4}} Of course, a deepness of 0 won't do anychanges, whereas deepness of -1 is the default value and means infinite deepness: >>> pp(inflate_dict({'a.b.c': 3, 'a.d': 4}, deep=0)) {'a.b.c': 3, 'a.d': 4} """ def mset(dct, k, v, sep=".", deep=-1): if deep == 0 or sep not in k: dct[k] = v else: khead, ktail = k.split(sep, 1) if khead not in dct: dct[khead] = {} mset(dct[khead], ktail, v, sep=sep, deep=-1 if deep < 0 else deep - 1) res = {} ## sorting keys ensures that colliding values if any will be string ## first set first so mset will crash with a TypeError Exception. for k in sorted(dct.keys()): mset(res, k, dct[k], sep, deep) return res
fa929cd7a1b4825fb750755a76efcfab0e3a2666
16,918
def encode_module_value(v): """ For all things not in builtins, return the module name, otherwise just return the name """ mod = v.__module__ v = getattr(v, "__qualname__", v.__name__) if mod == "builtins": return v return {"module": mod, "name": v}
497b8838f8458ff973bd9d3a30b839b328d0ab11
16,919
def versioned_item_expression( item_version: int, item_version_key: str = "item_version", id_that_exists: str = "" ) -> dict: """Assembles a DynamoDB ConditionExpression with ExprAttrNames and Values that will ensure that you are the only caller of versioned_item_diffed_update that has updated this item. In general it would be a silly thing to not pass id_that_exists if your item_version is not also 0. However, since this is just a helper function and is only used (currently) by the local consumer versioned_item_diffed_update, there is no need to enforce this. """ expr_names = {"#itemVersion": item_version_key} expr_vals = {":curItemVersion": item_version} item_version_condition = "#itemVersion = :curItemVersion" first_time_version_condition = "attribute_not_exists(#itemVersion)" if id_that_exists: expr_names["#idThatExists"] = id_that_exists first_time_version_condition = ( f"( {first_time_version_condition} AND attribute_exists(#idThatExists) )" ) return dict( ExpressionAttributeNames=expr_names, ExpressionAttributeValues=expr_vals, ConditionExpression=item_version_condition + " OR " + first_time_version_condition, )
f9bea2c833e9e74b3525f04b96b1b24c2bcafe19
16,920
def getattr_in(obj, name): """ Finds an in @obj via a period-delimited string @name. @obj: (#object) @name: (#str) |.|-separated keys to search @obj in .. obj.deep.attr = 'deep value' getattr_in(obj, 'obj.deep.attr') .. |'deep value'| """ for part in name.split('.'): obj = getattr(obj, part) return obj
8757c8f67cd5432e982988521c5d184fafe3d7e3
16,921
import os import json def load_results_from_disk(path, modelname): """Load the full result json for the given model from the path.""" filename = os.path.join(path, modelname + '_results.json') with open(filename, 'r') as handle: parsed = json.load(handle) return parsed
cae53dd39078e4ffe86de577963aa33664d644ee
16,922
def extract_Synonym_sub(infile, ref): """ :method: a sub-method :param infile: string(path) :param ref: string :return: """ f = open(infile, 'r') for i in range(0, 3): f.readline() while True: line = f.readline().strip() if not line: break li = line.split("\t") ref.append([li[4], li[5]]) f.close() return ref
3b2720d830af38b231e52a60bb31127a19ea9339
16,923
from typing import OrderedDict def csvtable_to_dict(fstream): """ Convert a csv file stream into an in memory dictionary. :param fstream: An open file stream to a csv table (with header) :returns: A dictionary with a key for each column header and a list of column values for each key. """ # Read in the lines from the file stream. lines = fstream.readlines() # There are two pieces of information we need for the headers: # 1. The actual header titles. # 2. A map of index to header title _ = lines.pop(0).strip("\n").split(",") # Retain the order of the columns as they're added. table = OrderedDict() # A map of row index to the appropriate header. indices = {} i = 0 # For each item in the header, mark its index and initialize its column. for item in _: indices[i] = item table[item] = [] i += 1 # Walk each line of the table, mapping the columns in the row to their key. for line in lines: # Split the csv row _ = line.split(",") # Walk each column and map it. for i in range(len(_)): table[indices[i]].append(_[i].strip("\n")) # Return the completed table return table
a562be13f2df806cbdd104eacb7dbca35afd2d35
16,924
def get_location_code(padding, indent): """ DRY """ dt_str = '' return(dt_str)
7b3e011da301d49cc23d291b8a6fd95909a9e167
16,925
def _get_cheats(header): """Get cheat mode.""" if header.de is not None: return header.de.cheats if hasattr(header.replay, 'cheats_enabled'): return header.replay.cheats_enabled return False
e73cf0d4be55b1080ceed6b3cdfa16aab6c1ed70
16,926
from pathlib import Path import shutil import subprocess def create_new_pack(): """ Creates new pack with given pack name """ content_path = Path(__file__).parent.parent.parent source_path = Path(__file__).parent / 'TestUploadFlow' dest_path = content_path / 'Packs' / 'TestUploadFlow' if dest_path.exists(): shutil.rmtree(dest_path) shutil.copytree(source_path, dest_path) subprocess.call(['demisto-sdk', 'format', '-i', dest_path], stdout=subprocess.DEVNULL) return dest_path
13d4d3086214716ed04c1c7cbb0825da83710ee8
16,927
def updateCharacterName(old_name: str, new_name: str) -> str: """Return a query to update a given character's name.""" return (f"UPDATE game_character " f"SET name='{new_name}' " f"WHERE name='{old_name}';" )
eb829e6be49393baf1c007c0331fd45a50050af5
16,930
def _readLines(fileName): """Returns a table containing the lines in fileName, without '\n' at ending""" file=open(fileName,'r') Lines = file.readlines() file.close() result = [] for line in Lines: result.append(line.replace('\n', '')) return result
d86f3f5644c08ab68c612394670b86442d9f4ca3
16,931
import click def user_callback(_ctx, param, value): """Testing callback that transforms a missing value to -1 and otherwise only accepts 42.""" if not value: return -1 if value != 42: raise click.BadParameter('invalid integer', param=param) return value
f6d2a247f68ff37626a5abb7efc3b6c5967a5202
16,932
import torch def bow_recon_loss(outputs, targets): """ Note that outputs is the bag-of-words log likelihood predictions. targets is the target counts. """ return - torch.sum(targets * outputs)
b15014244395a1979bc06aab6bacbfe41127ff6a
16,935
def np_gather_ijk_index(arr, index): """Gather the features of given index from the feature grid. Args: arr (numpy array): h*w*d*c, feature grid. index (numpy array): nx*3, index of the feature grid Returns: nx*c, features at given index of the feature grid. """ arr_flat = arr.reshape(-1, arr.shape[-1]) _, j, k, _ = arr.shape index_transform = index[:, 0] * j * k + index[:, 1] * k + index[:, 2] return arr_flat[index_transform]
3d4ddadadad1fbd44b060be96829496b3ecc4888
16,937
def shift_and_aggregate(df, shift, aggregate): """ To get a predictor from [lsp(t-3), ..., lsp(t-6)], use shift = 3 and aggregate = 3 Parameters ---------- shift : int aggregate : int """ return df.shift(time=shift).rolling(time=aggregate).sum()/aggregate
691f045bf321f6bbd487ab42fe74b5e1a971deef
16,938
def lcs(x, y): """ Finds the longest common subsequence of two strings. :param x: a string input value. :param y: a string input value. :return: the longest common subsequence of the two strings. """ matrix = [[0 for x in range(len(y) + 1)] for x in range(len(x) + 1)] for i in range(len(x) + 1): for j in range(len(y) + 1): if i == 0 or j == 0: matrix[i][j] = 0 elif x[i - 1] == y[j - 1]: matrix[i][j] = matrix[i - 1][j - 1] + 1 else: matrix[i][j] = max(matrix[i - 1][j], matrix[i][j - 1]) char_list = [] n = len(x) m = len(y) while n > 0 and m > 0: if x[n - 1] == y[m - 1]: char_list.append(x[n - 1]) n -= 1 m -= 1 elif matrix[n - 1][m] < matrix[n][m - 1]: m -= 1 else: n -= 1 return "".join(char_list)[::-1]
eba82c940d0fd4726190cc4e77b7d76d6446cb1b
16,939
import os def current_umask(): """ Get currently applied umask """ value = os.umask(0) os.umask(value) return value
a37be9de91f33d6d08791c498a400b1fc89c052e
16,941
def nested_print(this_name: str, root_dict: dict) -> str: """ Get printable report of the elements of a nested dictionary. Parameters: this_name (str): nameof(root_dict), where "from varname import nameof". root_dict (dict): the dictionary whose elements must be printed. Returns: output (str): printable report of the elements of a nested dictionary. """ output = "" for my_key, my_value in root_dict.items(): if isinstance(my_key, int): my_key_value = f"[{my_key}]" elif isinstance(my_key, str): my_key_value = f'["{my_key}"]' else: raise NotImplementedError if isinstance(my_value, int): my_value_value = f"{my_value}" elif isinstance(my_value, str): my_value_value = my_value.replace('\n', '<LF>').replace('\r', '<CR>') my_value_value = f'"{my_value_value}"' else: my_value_value = "WTF?" if not isinstance(my_value, dict): output += f"{this_name}{my_key_value} = {my_value_value}\n" else: output += f"{this_name}{my_key_value} = dict()\n" output += nested_print(this_name+my_key_value, my_value) return output
fd409abec98f9c4f3001f17c49a987e594827c0c
16,943
import os def environ_replace(match_obj): """ Purpose: Iterate over the resulting match groups from a regex and return the matching environment variable form to be appended to the front end UI :form_doc: a dictionary derived from a loaded json """ for x in match_obj.groups(): return os.getenv(x)
e4815026f586179fa98fd56591ad3205873480f7
16,944
import sys def parse_argv(): """Parses arguments for use with the test launcher. Arguments are: 1. Working directory. 2. Test runner, `pytest` or `nose` 3. Rest of the arguments are passed into the test runner. """ return (sys.argv[1], sys.argv[2], sys.argv[3:])
3cc9d4d99bf8e3414df07a87ef1a78fb377aaed7
16,945
def example_encoded_image(example): """Gets image field from example as a string.""" return example.features.feature['image/encoded'].bytes_list.value[0]
42da58881e1e55533206cfa147ff4a9e3e68fa23
16,946
def default_formatter(item): """ Default formatter (%s) :param item: The item to save to file :return: The item to be saved to file with a newline appended """ return '%s\n' % item
47603ec796f686a36562520492a591918c1d3041
16,947
def loop(iters, loop_suffix="0"): """Code containing an inline assembly loop responsible for delay. Args: * iters (int): The number of loop iterations. Kwargs: * loop_suffix (str): The suffix for ASM loop label. Default: "0" Returns: * str asm_loop: The code containing the ASM delay loop. Notes: * Please ensure that throughout your .ino code the individual loops have unique identifiers (suffixes). The .ino code won't compile otherwise! """ # Check if `iters` is int. If not, attempt conversion (if safe). if type(iters) is not int: try: assert int(iters) == iters iters = int(iters) except (ValueError, AssertionError): raise TypeError("Loop iteration count `iters` is not an int.") # Check that `iters` is a valid 32-bit unsigned int. # Also, forbid zero-iteration loops. if not 0 <= iters < 2**32: raise ValueError("Iteration count is not a valid 32-bit unsigned int.") if iters == 0: raise ValueError("Zero-iteration loops are forbidden.") # We fill a 32-bit loop counter register in two steps. # First goes the lower 16-bit half, then the top 16-bit half. # We need to split the number into two 16-bit halfs, then convert to hex. top, bottom = [*map(hex, divmod(iters, 65536))] asm_loop = " asm volatile (\n" \ f' "MOVW R1, #{bottom}\\n"\n' \ f' "MOVT R1, #{top}\\n"\n' \ f' "LOOP{loop_suffix}:\\n\\t"\n' \ ' "NOP\\n\\t"\n' \ ' "SUB R1, #1\\n\\t"\n' \ ' "CMP R1, #0\\n\\t"\n' \ f' "BNE LOOP{loop_suffix}\\n"\n' \ " );" return asm_loop
91c1b08188331c7a6f5a7e443499514982c63727
16,948
import os def IsBelowDir(root, other_path): """Check whether a path is below a root dir. This works around the nasty byte-byte comparison of commonprefix. """ if not (os.path.isabs(root) and os.path.isabs(other_path)): raise ValueError("Provided paths '%s' and '%s' are not absolute" % (root, other_path)) norm_other = os.path.normpath(other_path) if norm_other == os.sep: # The root directory can never be below another path return False norm_root = os.path.normpath(root) if norm_root == os.sep: # This is the root directory, no need to add another slash prepared_root = norm_root else: prepared_root = "%s%s" % (norm_root, os.sep) return os.path.commonprefix([prepared_root, norm_other]) == prepared_root
d1ac3c20ed05d53ddd6def7aac3ad79f04c230e3
16,949
def sort_orbitals(element_pdos): """Sort the orbitals of an element's projected density of states. Sorts the orbitals based on a standard format. E.g. s < p < d. Will also sort lm decomposed orbitals. This is useful for plotting/saving. Args: element_pdos (dict): An element's pdos. Should be formatted as a :obj:`dict` of ``{orbital: dos}``. Where dos is a :obj:`~pymatgen.electronic_structure.dos.Dos` object. For example:: {'s': dos, 'px': dos} Returns: list: The sorted orbitals. """ sorted_orbitals = [ "s", "p", "py", "pz", "px", "d", "dxy", "dyz", "dz2", "dxz", "dx2", "f", "f_3", "f_2", "f_1", "f0", "f1", "f2", "f3", ] unsorted_keys = element_pdos.keys() sorted_keys = [] for key in sorted_orbitals: if key in unsorted_keys: sorted_keys.append(key) return sorted_keys
de9b607895bad3c09709dcf9c9f1692fb07d5f63
16,950
import os def get_datadir(): """ Path to data directory. """ return os.path.join(os.path.dirname(os.path.realpath(__file__)),'data')
e545308a327967f3d916d68d98c8a7cfc20ec32a
16,951
def d_hid(request) -> int: """Mock hidden dimension.""" return request.param
af1f5aa9c391b208fd0f2d34726f982c38431bef
16,952
def calc_scaled_cv(cv_df): """ Scales the coefficient of variation of the single attributes by the mean coefficient of variation of the same attribute. This allows to find the attribute of the cluster that has the lowest variation for the cluster im relationship to the variation of the variable in the whole dataset """ cv_df_scaled = cv_df.copy(deep=True) # Get the mean cv for the attributes means = cv_df_scaled.mean(axis=1) # Calc for cluster in cv_df_scaled.columns: cv_df_scaled[cluster] /= means return cv_df_scaled
205857f448a2ec63636f0d544157db6d4a44170a
16,953
def crossref_title_parser(title): """Function to parse title. Returns: str""" return " ".join(title[0].split())
91878f43c063fbbdda3e333d93436ba9ee377270
16,954
import os import resource def peak_memory_measurement(proc): """" Measures the peak memory consumption of the software in MiB the until a certain runtime point. :param proc: The current process. """ if os.name == "nt": # Works only in Windows systems mem = proc.memory_full_info().peak_wset / 2 ** 20 # Original measurement in bytes else: mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024 # Original measurement in KiB return mem
5fd11bfc5900972aeefec1886cfe3b99479637a4
16,956
def cached_and_cgi(name, template_func, render): """Return 2 functions for testing template in cached and cgi modes.""" _template = template_func() def test_cached(): # reuse early created template render(_template) test_cached.__doc__ = "test_%s" % name def test_cgi(): # create new template on each call render(template_func()) test_cgi.__doc__ = "test_%s_cgi" % name return test_cached, test_cgi
da616817b7a45cfa0c340f7cbde970e009c35f73
16,957
def get_global_address(address, bank): """ Return the rom address of a local address and bank. This accounts for a quirk in mbc3 where 0:4000-7fff resolves to 1:4000-7fff. """ if address < 0x8000: if address >= 0x4000 and bank > 0: return address + (bank - 1) * 0x4000 return address
d72c9022f6c913d9d25f54c48539ea8f68f43b19
16,959
def minibatch(x, batchsize): """Group the rows of x into minibatches of length batchsize""" return [x[i:(i + batchsize)] for i in range(0, len(x), batchsize)]
026774d1a17454aebe714788eb1bed0ff4d45e3f
16,960
def stringify_dict(datum): """ returns log data with all values as strings """ return dict((x, str(datum[x])) for x in datum)
b4630c34f2812dc70079676707443486b71d50c3
16,962
import pywintypes from win32com.shell import shell import os def show_files_win32(path, files): """Takes a path to a directory and a list of filenames in that directory to display. Returns True on success. """ assert os.name == "nt" assert is_fsnative(path) assert all(is_fsnative(f) for f in files) normalized_files = map(normalize_path, files) try: folder_pidl = shell.SHILCreateFromPath(path, 0)[0] desktop = shell.SHGetDesktopFolder() shell_folder = desktop.BindToObject( folder_pidl, None, shell.IID_IShellFolder) items = [] for item in shell_folder: name = desktop.GetDisplayNameOf(item, 0) if normalize_path(name) in normalized_files: items.append(item) shell.SHOpenFolderAndSelectItems(folder_pidl, items, 0) except pywintypes.com_error: return False else: return True
04e8393f7cd3a8256337a49539d409db5b037d66
16,963
def ni_to_hr(ni, f): """Calculate heart rate in beat/min from estimated interval length Args: ni (int): estimated inter-beat interval length f (float): in Hz; sampling rate of input signal Returns: float: heart rate in beat/min """ if ni == -1: return -1 return 60. * f / ni
541e6f31d9df2fb4645f7b7c501ad123ad1d9660
16,964
def calc_fuel_emf(plant_emf, heat_rate): """ This is a helper function that calculates emission factors of a power plant into specific factors of the fuels it is useful for when the fuel factor is not reported, it only works with the base plants, don't use with capture plants as its outputs would not make sense :param plant_emf: Emission factor in the form of kgCo2 per KWh :param heat_rate: Heat rate in the form of KJ per KWh :return: Emission factor as Kg per KJ of Heat produced or ton CO2 per MJ of Heat produced """ return plant_emf / heat_rate
869cc32f72d343f2c2e7cbe6924201dbf8ca5e70
16,965
def _count(): # real signature unknown; restored from __doc__ """ _count() -> integer Return the number of currently running Python threads, excluding the main thread. The returned number comprises all threads created through `start_new_thread()` as well as `threading.Thread`, and not yet finished. This function is meant for internal and specialized purposes only. In most applications `threading.enumerate()` should be used instead. """ return 0
ec307d4711140b063029bd28ee6abdfa094cd148
16,966
def check_piece_move(move, board): """ Check if move for piece is valid Args: piecetype (int): Type of piece to move from_square (int): Index of square the piece is standing on to_square (int): Index of square the piece should move to board (Board): Board object Returns: bool: True if move is possible, False if not """ if move in board.gen_legal_moves(): return True return False
c54ee5863c0b8f01574c98858fd21b14bbe4c666
16,967
def insertion(N, M, i, j): """ example input: N = 10000000000, M = 10011, i = 2, j = 6 example output: 10001001100 """ M_shifted = M << i right_mask = (1 << i) - 1 # produces 0...011 left_mask = -1 << j + 1 # produces 1...1000000 full_mask = right_mask | left_mask N_cleared = N & full_mask return N_cleared | M_shifted
41e8d80239cbe42c383078d215339438984622f5
16,969
def l_out(l_in: int, padding: int, dilation: int, kernel: int, stride: int) -> int: """ Determine the L_out of a 1d-CNN model given parameters for the 1D CNN :param l_in: length of input :param padding: number of units to pad :param dilation: dilation for CNN :param kernel: kernel size for CNN :param stride: stride size for CNN :return: """ return (l_in + 2 * padding - dilation * (kernel - 1) - 1) // stride + 1
85e5d94dbcfdce2c7671674b3ddb7dd77f69728e
16,971
def reshape_array(array, new_len): """ array: shape= [M,N] new_len: the length of the new array, the reshaped shape will be [M//new_len, new_len, N] """ M, N = array.shape m = M // new_len return array[: m * new_len, :].reshape([m, new_len, N])
38eefb3ec7caa97c15775b06dcef5a8d0bf7b42a
16,972
def junk_np_calc_chisq(data, b, w, coef): """ Calculate chi squared Args: im: nim x npix, single-precision numpy.ndarray. Data to be fit by the basis images b: nvec x npts, double precision numpy.ndarray. The nvec basis images. w: nim x npts, single-precision numpy.ndarray. Weights (inverse variances) of the data. coef: nvec x npts, double precision numpy.ndarray. The coefficients of the basis image fits. Returns: chisq, the total chi squared summed over all points and all images """ return("1")
a18f9f2db55216bb0a371b6b64e59086b294e61e
16,973
import os import torch def save_checkpoint(model, optimizer, epoch, miou, compression_scheduler, config): """Saves the model in a specified directory with a specified name.save Keyword arguments: - model (``nn.Module``): The model to save. - optimizer (``torch.optim``): The optimizer state to save. - epoch (``int``): The current epoch for the model. - miou (``float``): The mean IoU obtained by the model. - compression_scheduler: The compression scheduler associated with the model - config: Model config". Returns: The path to the saved checkpoint. """ name = config.name save_dir = config.checkpoint_save_dir assert os.path.isdir( save_dir), "The directory \"{0}\" doesn't exist.".format(save_dir) # Save model checkpoint_path = os.path.join(save_dir, name) + "_last.pth" checkpoint = { 'epoch': epoch, 'miou': miou, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': compression_scheduler.state_dict() } torch.save(checkpoint, checkpoint_path) return checkpoint_path
eace204aa4512ae444c6ad07c74564128ff26357
16,974
def get_texts_by_category(texts, categories, choose_category): """ :param texts: :param category: :return: """ text_chosen = [] for i in range(len(texts)): if categories[i] == choose_category: text_chosen += [texts[i]] return text_chosen
830a23e003786fc99b141afdd6de2ab10322193e
16,976
def change_list_to_dict(list_to_convert, key): """ Changes a list into a dictionary using the 'key' parameter as the dictionary keyself. Assumes the key is in each dictionary. Assumes the key is unique. """ dict_to_export = {} for each_obj_in_list in list_to_convert: # Check if key already exists in dictionary. if key in dict_to_export.keys(): raise Exception(f"This key name is not unique.") dict_to_export[each_obj_in_list[key]] = each_obj_in_list return dict_to_export
7f624fe85469b0cfddf7723cdb561f1d6c2049ef
16,977
def get_shape_from_value_info(value): """Get shape from a value info. :param value: the value_info proto\\ :return: list of the shape """ return [d.dim_value for d in value.type.tensor_type.shape.dim]
77c3216cffd93900b50bb85ad6dfb43dda31b460
16,978
def zero(x): """return zero.""" return 0
d01d0d47730e2fbf800c37fcbe835ca3702216e7
16,980
def handleShouldBeList(elem): """ Handle if user send a value instead of a list """ if elem: elem = elem if isinstance(elem, list) else [elem] return elem
36bd7a0adbd02b64b0e62b0fd4759a77770a20b7
16,981
import json def read_label_file(label_path: str, min_height: float = 1.0, min_width=1.0): """ Reads label file and returns path: label dict Args: label_path: path to label file (json) min_height: minimum AABB height for filtering labels min_width: minimum AABB width for filtering labels You can't believe how noisy some human annotations are. That single pixel width and height filter are in there for a reason """ with open(label_path) as lph: labels = json.load(lph) for key, label in labels.items(): pop_list = [] for vehicle_id, vehicle in enumerate(label["vehicles"]): aabb = vehicle["AABB"] if aabb["x2"] - aabb["x1"] < min_width or aabb["y2"] - aabb["y1"] < min_height: pop_list.append(vehicle_id) for pop_id in reversed(pop_list): del label["vehicles"][pop_id] return labels
ad588db65d1cdb76fe92fc15e8d2323c59ba1dbc
16,982
import os import pickle def get_private_data(filename: str) -> dict: """ Загружает информацию для авторизации из pikle dump'a :param filename: путь до дампа данных :return: словарь с ключами username, client_id, token, channel """ if not os.path.exists(filename): private_data = {} print("Введите имя пользователя бота: ") private_data["username"] = input() print("Введите client id: ") private_data["client_id"] = input() print("Введите токен пользователя: ") private_data["token"] = input() print("Введите название канала: ") private_data["channel"] = input() with open(filename, "wb") as f: pickle.dump(private_data, f) with open(filename, "rb") as f: return pickle.load(f)
7689803d479fce1ff9f8c749cda3c646feb35108
16,983
def decode(encoded_digits: list[str], mapping: dict) -> int: """decode a number. Use the mapping to decode the encoded digits and combine them to a number. Args: encoded_digits (list[str]): encoded digits mapping (dict): mapping that decodes each segment Returns: (int) decoded number Examples: >>> decode(["cf", "fc", "acf"], {"a":"a", "c":"c", "f":"f"}) 117 >>> decode(["cb", "bc", "acb"], {"a":"a", "c":"c", "b":"f"}) 117 >>> decode(["fcdb", "bc", "acb"], {"a":"a", "b":"f", "c":"c", "d":"d", "f":"b"}) 417 """ digits = { "abcefg": "0", "cf": "1", "acdeg": "2", "acdfg": "3", "bcdf": "4", "abdfg": "5", "abdefg": "6", "acf": "7", "abcdefg": "8", "abcdfg": "9", } result = "" for digit in encoded_digits: decoded_segments = "" for segment in digit: decoded_segments += mapping[segment] decoded_segments = "".join(sorted(decoded_segments)) result += digits[decoded_segments] return int(result)
be4cbd2fbc8be31b1126676a3a115b345b598c8a
16,984
def _is_float(string_inp: str) -> bool: """Method to check if the given string input can be parsed as a float""" try: float(string_inp) return True except ValueError: return False
c9a798b551b5ef0e9460b0d16f48757510237b73
16,985
def __next_power_of_2(x): """ Calculate next power of 2 after x If x is a power of 2, then x is returned Using bit shift, works only on int type :param x: must be a positive integer :return: """ return 1 << (x - 1).bit_length()
95841e5a3c773502a0fa2a7cb0e8e329d3ee22fc
16,988
def _multi_upsert(cursor, keys, values): """ Takes a dictionary of key:value pairs and upserts them using a multiset. :param cursor: :param upsert_dict: :return: """ upsert_dict = dict(zip(keys, values)) return cursor.mset(upsert_dict)
9cbe8355e1d1fef88021471a208feba1e7317439
16,990
def str2bool(x): """Converts a string to boolean type. If the string is any of ['no', 'false', 'f', '0'], or any capitalization, e.g. 'fAlSe' then returns False. All other strings are True. """ if x is None or x.lower() in ['no', 'false', 'f', '0']: return False else: return True
b3360b999370137ed5b74e3a1a7d8ddaf17be03f
16,992
import math def coord(angle, center, radius): """ 返回圆上点坐标 (x,y) 坐标说明: 例如point(100,100),是距左边框100像素,距顶部100像素的一个点 box(100,100,150,150),距顶部和左侧边框各100像素的50像素宽的一个方形框 """ #角度转弧度 rad = math.radians(angle) x = int(center[0] + math.cos(rad) * radius) y = int(center[1] + math.sin(rad) * radius) return x, y
9641eead13814a22073fe6b9839252e2863a59cc
16,993
def allsame(iterable): """Return whether all elements of an iterable are the same. The test uses `!=` to compare, and short-circuits at the first item that is different. If `iterable` is empty, the return value is `True` (like for `all`). If `iterable` has just one element, the return value is `True`. **CAUTION**: Consumes consumable iterables. """ it = iter(iterable) try: x0 = next(it) except StopIteration: return True # like all(()) is True for x in it: if x != x0: return False return True
7859dcc19a0385978f7f55ae4b35151a85a303a6
16,994
def moveMargin(img, margin): """去除图片边框""" x, w, y, h = margin return img[x:w, y:h]
9c24701205d82cd45108aa64ecff2b18c679eba8
16,995
import re def isfilepath(value): """ Return whether or not given value is Win or Unix file path and returns it's type. If the value is Win or Unix file path, this function returns ``True, Type``, otherwise ``False, Type``. Examples:: >>> isfilepath('c:\\path\\file (x86)\\bar') True, 'Win' >>> isfilepath('/path') True, 'Unix' >>> isfilepath('c:/path/file/') False, 'Unknown' :param value: string to validate file path """ win_path = re.compile(r'^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$') nix_path = re.compile(r'^(/[^/\x00]*)+/?$') if win_path.match(value): # check windows path limit see: # http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath if len(value[3:]) > 32767: return False, 'Win' return True, 'Win' elif nix_path.match(value): return True, 'Unix' return False, 'Unknown'
56a423a3b27df5ad0e66291db0bd2698fef5a8b5
16,996
def _get_result_from_dynamo_query(response: dict) -> dict: """ Dynamo returns list of db items in the table. Query returns only 1 items to take the first item from the list. """ item_retrieved_from_db = response["Items"] item_retrieved_from_db = item_retrieved_from_db[0] return item_retrieved_from_db
da1276bca3412a213b80fff6771bb888b319690b
16,997
import glob import os def __get_file_handles(in_dir, ext, recursive=False): """ Get file handles for all delimited files that are to be imported. Parameters ---------- in_dir: str The directory in which the delimited files are stored. ext: str The file extension of the delimited files. recursive: bool, optional If :code:`True`, delimited files are imported for all child directories of :code:`directory` (including :code:`directory`). If :code:`False`, only files in :code:`directory` are imported. Default is :code:`False`. Returns ------- in_files: ls of strings The file handles to all delimited files that are to be imported. """ if not recursive: in_files = sorted(glob.glob('{}{}*.{}'.format(in_dir, os.sep, ext))) else: in_files = [] dir_list = [x[0] + os.sep for x in os.walk(in_dir)] for directory in dir_list: in_files.extend(sorted(glob.glob('{}*.{}'.format(directory, ext)))) # in_files = [f.replace(in_dir, '').lstrip(os.sep) for f in in_files] return in_files
8338bc5baff835d35888c7a60c3734fa5162485a
16,998
def is_there_a_global(name): """ Simple utility to interrogate the global context and see if something is defined yet. :param name: Name to check for global definition in this module. :returns: Whether the target ``Name`` is defined in module globals and is not falsy. """ gl = globals() return name in gl and (not gl[name])
3c7c90dbb20894171162f14b1a4441e072dfa2c2
16,999
def zero_x_encoding(t): """0x encoding method. >>> zero_x_encoding("A") '0x41' >>> zero_x_encoding("ABC") '0x414243' """ return "0x" + "".join(hex(ord(c))[2:] for c in t)
b3659f372fee1515584a147dec50fb74bb04db94
17,000
import torch def train_model(train_loader, model, optimizer, criterion, device): """ Note: train_loss and train_acc is accurate only if set drop_last=False in loader :param train_loader: y: one_hot float tensor :param model: :param optimizer: :param criterion: set reduction='sum' :param device: :return: """ model.train(mode=True) train_loss = 0 correct = 0 for batch_idx, (x, y) in enumerate(train_loader): x, y = x.to(device), y.to(device) global_prob = model(x)[0] if isinstance(criterion, torch.nn.CrossEntropyLoss): _, yi = y.max(dim=1) loss = criterion(global_prob, yi) else: loss = criterion(global_prob, y) optimizer.zero_grad() loss.backward() optimizer.step() train_loss += loss.item() with torch.no_grad(): pred = global_prob.max(1, keepdim=True)[1] # get the index of the max log-probability _, y_idx = y.max(dim=1) correct += pred.eq(y_idx.view_as(pred)).sum().item() train_loss /= len(train_loader.dataset) train_acc = correct / len(train_loader.dataset) return {'loss': train_loss, 'acc': train_acc}
43621d0a6a0285960ffb2dad8f19ca3a4eebf29d
17,001
import torch def crop_bounding_boxes(image_t, boundingboxes_t): """A differentiable version of bounding box cropping. Note that if the number of bounding boxes per image is different, the output tensors have different sizes. Parameters ---------- image_t : torch.tensor Tensor with a batch of images, shaped [N, C, H, W] boundingboxes_t : torch.tensor Tensor with a batch of bounding box coordinates, shaped [N, N_boxes, 4]. First 2 indicate top left corner, last 2 indicate bottom right corner (x_top, y_top, x_bottom, y_bottom) """ image_stack = [] for image, box_coords in zip(image_t, boundingboxes_t): crops = [] for coords in box_coords: x_min, y_min, x_max, y_max = coords crops.append(image[:, x_min:x_max, y_min:y_max]) image_stack.append(torch.stack(crops)) return image_stack
1be12c49489935a5c0514285dfa8b3c1c5620e46
17,002
import pathlib import os def _get_config_path(system: str) -> pathlib.Path: """Return the path of the config file. Parameters: system: The OS being used. """ home_dir = pathlib.Path.home() if system == 'Windows': config_path = ( pathlib.Path( os.getenv('APPDATA', home_dir / pathlib.Path('AppData', 'Roaming'))) / pathlib.Path('chime', 'chime.ini') ) else: config_path = home_dir / pathlib.Path('.config', 'chime', 'chime.conf') return config_path.resolve().absolute()
06823fcf962d52623de29708228cb52aa04210ab
17,003
import torch import math def softmax(scores: torch.Tensor, base: float = math.e, axis: int = -1) -> torch.Tensor: """Returns softmax array for array of scores Converts a set of raw scores from a model (logits) into a probability distribution via softmax. The probability distribution will be a set of real numbers such that each is in the range 0-1.0 and the sum is 1.0. Assumes input is a pytorch tensor: tensor([1.0, 4.0, 2.0, 3.0]) Args prediction (pytorch tensor) a pytorch tensor of any positive/negative real numbers. base (float) the base for the exponential (default e) """ exps = base ** scores.float() # exponential for each value in array sum_exps = torch.sum(exps, dim=axis, keepdim=True) # sum of all exponentials prob_dist = exps / sum_exps # normalize exponentials return prob_dist
f2719094a7de73e362a944e28491e34fc56e67d9
17,004
def CopyRunfiles(ctx, runfiles, copy, symlink, executable, subdir): """Copies all runfile to the same directory and returns new runfiles Args: ctx: [DotnetContext](api.md#DotnetContext) runfiles: depset(File) to copy to target directory of executable copy: target for utility copy tool symlink: target for utility symlink tool executable: [DotnetLibrary](api.md#DotnetLibrary) which directory is used as a base dir for the runfiles subdir: additional subdirectory to copy files to Returns: [runfiles](https://docs.bazel.build/versions/master/skylark/lib/runfiles.html) """ copied = {} created = [] nocopy_dir = executable.result.dirname for f in runfiles.files.to_list(): found = copied.get(f.basename) if found: continue copied[f.basename] = True if f.basename == "mono" or f.basename == "mono.exe": newfile = ctx.actions.declare_file(subdir + f.basename) ctx.actions.run( outputs = [newfile], inputs = [f] + symlink.files.to_list(), executable = symlink.files.to_list()[0], arguments = [newfile.path, f.path], mnemonic = "LinkFile", ) created.append(newfile) elif f.dirname != nocopy_dir: if f.basename.find("hostfxr") >= 0: version = f.path.split("/") newfile = ctx.actions.declare_file("{}/host/fxr/{}/{}".format(subdir, version[-2], version[-1])) else: newfile = ctx.actions.declare_file(subdir + f.basename) ctx.actions.run( outputs = [newfile], inputs = [f] + copy.files.to_list(), executable = copy.files.to_list()[0], arguments = [newfile.path, f.path], mnemonic = "CopyFile", ) created.append(newfile) else: created.append(f) return ctx.runfiles(files = created)
b123a5ced2bd05000e58ff37344586f94415ddc6
17,005
from typing import Optional import json def read_json_file(path: str, silent: bool = True) -> Optional[dict]: """ Convenience function to read a json file with catching exceptions :param path: Path to the json file :param silent: Whether to ignore exceptions or not :return: Optional[dict] """ content = None exception = None try: with open(path, 'r') as json_file: content = json.load(json_file) except FileNotFoundError as file_not_found_error: exception = file_not_found_error except json.decoder.JSONDecodeError as json_decode_error: exception = json_decode_error if not silent and exception: raise exception return content
0793ebd769a40a64ecfd8d65e0f51100f52c51c9
17,006
import argparse def grab_args(): """Grab and return arguments""" parser = argparse.ArgumentParser( description="Upload results to swift using instructions from zuul" ) parser.add_argument('--no-indexes', action='store_true', help='do not generate any indexes at all') parser.add_argument('--no-root-index', action='store_true', help='do not generate a root index') parser.add_argument('--no-dir-indexes', action='store_true', help='do not generate a indexes inside dirs') parser.add_argument('--no-parent-links', action='store_true', help='do not include links back to a parent dir') parser.add_argument('-n', '--name', default="logs", help='The instruction-set to use') parser.add_argument('files', nargs='+', help='the file(s) to upload') return parser.parse_args()
6a033fc41dad94cffe65d20025ed002c555a396c
17,007
def summarize_cag_taxa(cag_id, cag_tax_df, taxa_rank): """Helper function to summarize the top hit at a given rank.""" # If there are no hits at this level, return None if cag_tax_df is None: return { "CAG": cag_id, "name": 'none', "label": "No genes assigned at this level" } # Return the top hit return { "CAG": cag_id, "name": cag_tax_df["name"].values[0], "label": "{}<br>{:,} genes assigned".format( cag_tax_df["name"].values[0], int(cag_tax_df["count"].values[0]) ) }
e3b2736bf223490c5f3b9957b1734eeeffa2de07
17,008
def is_localhost(host): """Verifies if the connection is local Parameters ---------- host : str The requesting host, in general self.request.headers['host'] Returns ------- bool True if local request """ localhost = ('localhost', '127.0.0.1') return host.startswith(localhost)
3366fbc02d3e89606a27a2a0622624182d498c2f
17,009
def print_vpd_hex_ascii(vpd_list, display=True): """ Add ASCII string to the VPD dump list Input: ['0000: 02 01 00 00 00 00 00 00 30 39 39 32 38 38 36 2d', '0010: 30 38 00 00 00 00 00 00 00 00 00 00 00 00 00 00'] Returns: ['0000: 02 01 00 00 00 00 00 00 30 39 39 32 38 38 36 2d ........0992886-', '0010: 30 38 00 00 00 00 00 00 00 00 00 00 00 00 00 00 08..............'] """ dump_lines = [] hex_str = '' string = '' addr_header = '' for i in range(len(vpd_list)): if i % 16 == 0: addr_header = "%04x"%(i) v = vpd_list[i] v_d = v_h = v if isinstance(v, int): v_d = v v_h = "%02x" % v elif isinstance(v, str): v_h = v v_d = int(v, 16) c = "." if v_d >= 32 and v_d <= 126: c = chr(v_d) hex_str += v_h + ' ' string += c if i % 16 == 15 or i == (len(vpd_list) - 1): line = addr_header + ": " + hex_str + " " + string hex_str = '' string = '' dump_lines.append(line) return dump_lines
03389ede1d82b98c59a34f6c2f16b6058afba984
17,010
def inside(resource1, resource2): """Is ``resource1`` 'inside' ``resource2``? Return ``True`` if so, else ``False``. ``resource1`` is 'inside' ``resource2`` if ``resource2`` is a :term:`lineage` ancestor of ``resource1``. It is a lineage ancestor if its parent (or one of its parent's parents, etc.) is an ancestor. """ while resource1 is not None: if resource1 is resource2: return True resource1 = resource1.__parent__ return False
906a05912bba8b299e42fdb3a3b4547a1b160bb4
17,011
def ask(question, no_input=False): """Display a Y/n question prompt, and return a boolean""" if no_input: return True else: input_ = input('%s [Y/n] ' % question) input_ = input_.strip().lower() if input_ in ('y', 'yes', ''): return True if input_ in ('n', 'no'): return False print('Invalid selection')
b7eed52f3fa3eb65ed99d2076cce6520489269a1
17,013
def electrical_mobility_from_D(D, charge, T, constants=None, units=None): """ Calculates the electrical mobility through Einstein-Smoluchowski relation. Parameters ---------- D: float with unit Diffusion coefficient charge: integer charge of the species T: float with unit Absolute temperature constants: object (optional, default: None) if None: T assumed to be in Kelvin and b0 = 1 mol/kg else: see source code for what attributes are used. Tip: pass quantities.constants units: object (optional, default: None) attributes accessed: meter, Kelvin and mol Returns ------- Electrical mobility """ if constants is None: kB = 1.38064852e-23 e = 1.60217662e-19 if units is not None: kB *= units.joule / units.kelvin / units.mol e *= units.coulomb else: kB = constants.Boltzmann_constant e = constants.elementary_charge return D*charge*e/(kB*T)
ec6d60ead515baf0a2faad59661f94067a7f3f7f
17,015
def get_index_by_node_id(data): """ Indexes a Dynalist data object by node for easy navigation. """ index = {} for node in data["nodes"]: index[node["id"]] = node return index
2a630c7468258625c9a3193e6b9906ad23293375
17,016
import yaml def dump(data): """ Dump a YAML file. """ return yaml.dump(data)
37845ceb70fa0fddcbf6a2fbbec51212bc70c897
17,017
def two_strings(s1: str, s2: str) -> str: """ >>> two_strings("hello", "world") 'YES' >>> two_strings("hi", "world") 'NO' """ intersection = {*s1} & {*s2} return "YES" if intersection else "NO"
72dac0f529f0267ae9a19ef6230aba4cdfb681ce
17,018
def copysign(x, y): """Return *x* with the sign of *y*. On a platform that supports signed zeros, ``copysign(1.0, -0.0)`` returns *-1.0*.""" return 0.0
9cd6630cd103347e13f98d415a8574567ba4d683
17,019
def mat_mul(model, blob_in, blob_out, **kwargs): """Matrix multiplication""" return model.net.MatMul(blob_in, blob_out, **kwargs)
c77a0f66f3fa29ea9aa7f0afc20f29fa48e31fe4
17,020
def subset_on_taxonomy(dataframe, taxa_level, name): """ Return only rows of the datframe where the value in column taxa_level matches the specified name. :param dataframe: Pandas DataFrame with columns like 'Kingdom', 'Phylum', 'Class', ... :param taxa_level: a taxagenetic label such as "Genus" or "Order" :param name: taxa_level name to match :return: subset of Pandas DataFrame matching the selection """ #print(dataframe.columns) return dataframe[dataframe[taxa_level] == name]
a1d72a96b277791d677e2bf81073ed7f4daa423f
17,021
def clean_movie_title(movie_title: str) -> str: """ Cleans up Movie Titles for Search """ split_title = movie_title.split() return " ".join(title.capitalize() for title in split_title).strip()
157ef34ed8fae279a210cba2059db464ae77764e
17,023
from typing import Optional from typing import Union from datetime import datetime def fix_time_retrieved(s: str) -> Optional[Union[str, datetime]]: """ Fixes timestamps of the format: 15-Jul-2021 (22:29:25.643316) """ if not s or s == "None": return s return datetime.strptime(s, "%d-%b-%Y (%H:%M:%S.%f)")
6d6a163490bbfe312c4ca1a4e2508ba1f71f096d
17,027