content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def deny_all(*args, **kwargs): """Return permission that always deny an access. :returns: A object instance with a ``can()`` method. """ return type('Deny', (), {'can': lambda self: False})()
74898e52aba03a57267c6209b434a1c8843a39b7
20,728
def get_chosen_df(processed_cycler_run, diag_pos): """ This function narrows your data down to a dataframe that contains only the diagnostic cycle number you are interested in. Args: processed_cycler_run (beep.structure.ProcessedCyclerRun) diag_pos (int): diagnostic cycle occurence for a specific <diagnostic_cycle_type>. e.g. if rpt_0.2C, occurs at cycle_index = [2, 37, 142, 244 ...], <diag_pos>=0 would correspond to cycle_index 2. Returns: a datarame that only has the diagnostic cycle you are interested in, and there is a column called 'diagnostic_time[h]' starting from 0 for this dataframe. """ data = processed_cycler_run.diagnostic_interpolated hppc_cycle = data.loc[data.cycle_type == "hppc"] hppc_cycle = hppc_cycle.loc[hppc_cycle.current.notna()] cycles = hppc_cycle.cycle_index.unique() diag_num = cycles[diag_pos] selected_diag_df = hppc_cycle.loc[hppc_cycle.cycle_index == diag_num] selected_diag_df = selected_diag_df.sort_values(by="test_time") selected_diag_df["diagnostic_time"] = (selected_diag_df.test_time - selected_diag_df.test_time.min()) / 3600 return selected_diag_df
b09abfdc3a9b1fa7836f548d6c40ca7845321418
20,729
def hms_to_sec(hms): """ Converts a given half-min-sec iterable to a unique second value. Parameters ---------- hms : Iterable (tuple, list, array, ...) A pack of half-min-sec values. This may be a tuple (10, 5, 2), list [10, 5, 2], ndarray, and so on Returns ------- out : scalar (int, float, ...) depending on the input values. Unique second value. """ h, m, s = hms return 3000 * h + 60 * m + s
9da83a9487bfe855890d5ffd4429914439f4b28b
20,733
import os def get_path_to_file_from_here(filename, subdirs=None): """ Returns the whole path to a file that is in the same directory or subdirectory as the file this function is called from. Parameters ---------- filename : str The name of the file subdirs : list of strs, optional A list of strings containing any subdirectory names. Default: None Returns ------- str The whole path to the file """ if subdirs is None: path = os.path.join(os.path.dirname(__file__), filename) elif isinstance(subdirs, list): path = os.path.join(os.path.dirname(__file__), *subdirs, filename) else: msg = ("subdirs must have type list. " "If you want a single subdirectory, use subdirs=['data']") raise ValueError(msg) return path
f6f89355986c3ca42749f5a3c982aad0f79e1e39
20,734
def tag_clusters(docs, cluster_assignment, cluster_name="cluster_covid"): """Tag clusters with an assignment""" docs_copy = docs.copy() docs_copy[cluster_name] = docs_copy["project_id"].map(cluster_assignment) docs_copy[cluster_name] = docs_copy[cluster_name].fillna("non_covid") return docs_copy
dd48b43dc54483e248cbc149de7e3e968c943836
20,735
def easeInOutQuad(t, b, c, d): """Robert Penner easing function examples at: http://gizma.com/easing/ t = current time in frames or whatever unit b = beginning/start value c = change in value d = duration """ t /= d/2 if t < 1: return c/2*t*t + b t-=1 return -c/2 * (t*(t-2) - 1) + b
a4ae2cc0b2c03a499bee456a08bdada023570361
20,736
def defaultToSelfText(fn): """ A fun little decorator that makes it so we can default to the text stored on a class instance, but also let people just instantiate and re-use calls while supplying new text. Whee. """ def wrapper(self, text = None): if text is None: text = self.text return fn(self, text = text) return wrapper
add3fbc6247b6959a2acc9e33580acc2e3d07a8b
20,739
def to_str(data, **kwargs): """ Reference Name ``to_str`` Function to transform Python structure to string without applying any formatting :param data: (structure) Python structure to transform :param kwargs: (dict) ignored :return: string """ return str(data)
e1972d06e5f3328ad63d880b5e71cd5fc5c17005
20,740
def a_1d_worse_less_corr(a_1d): """similar but not perfectly correlated a_1d.""" a_1d_worse = a_1d.copy() step = 3 a_1d_worse[::step] = a_1d[::step].values + 0.1 return a_1d_worse
f7c0120968f0772095a63b3497b5fdf08d67ce74
20,741
def higlight_row(string): """ When hovering hover label, highlight corresponding row in table, using label column. """ index = string["points"][0]["customdata"] return [ { "if": {"filter_query": "{label} eq %d" % index}, "backgroundColor": "#3D9970", "color": "white", } ]
c4473cf2d41b4ef7df6d08048dd6f9fe8f5d4099
20,742
def enforce_order(fmt: bytes) -> bytes: """Enforce consistent ordering in a ``struct`` format""" if fmt[:1] == b'>': return fmt elif fmt[:1] not in b'@=<!': return b'>' + fmt else: return b'>' + fmt[1:]
d1b74bcf9641f232c0aea10f9bea77456dbc1637
20,743
def get_dict_value(key, data): """Return data[key] with improved KeyError.""" try: return data[key] except (KeyError, TypeError): raise KeyError("No key [%s] in [%s]" % (key, data))
166656c226bb7a846c7c63f6d8d07ab7ee1a81f9
20,744
from typing import Union import re def _expand_deb_query_value(key: str, value: str) -> Union[str, list]: """Expands a value gained from deb-query --status if necessary. Some keys like 'Depends' are a list of other packages, which might contain package version information too. For further ease of computation we break them into a list of tuples of necessary. :param key: the key as gained by deb-query :param value: the value of this key :return: """ if key in ['replaces', 'depends', 'breaks', 'recommends', 'conflicts', 'suggests', 'pre-depends']: package_version_list = [] for pkg in value.split(','): pkg = pkg.strip() m = re.match(r'(^.*).\((.*)\)', pkg) if m: package_version_list.append({'package': m.group(1), 'version': m.group(2)}) else: package_version_list.append({'package': pkg}) return package_version_list return value
d5af88395912ad098ddaab07bf44c93d19af44a3
20,745
def list_to_string(s, separator='-'): """ Convert a list of numeric types to a string with the same information. Arguments --------- s : list of integers separator : a placeholder between strings Returns ------- string Example ------- >>> list_to_string( [0,1], "-" ) """ str1 = "" for ele in s: str1 += (separator+str(ele)) return str1
bf926cbc87895fe820d5de8c206f499f9558eefd
20,746
import functools def cached_method(cache_size=100): """ Decorator to cache the output of class method by its positional arguments Up tp 'cache_size' values are cached per method and entries are remvoed in a FIFO order """ def decorator(fn): @functools.wraps(fn) def wrapper(self, *args, **kwargs): _dict = self._cached_methods[fn.__name__] if args in _dict: return _dict[args] res = fn(self, *args, **kwargs) if len(_dict) >= cache_size: _dict.popitem(False) _dict[args] = res return res return wrapper return decorator
3e580dc984373d3a19fd37a7c195ee3c4842d8b3
20,748
def portion_dedusted(total, fraction): """ Compute the amount of an asset to use, given that you have total and you don't want to leave behind dust. """ if total - (fraction * total) <= 1: return total else: return fraction * total
473ee713986b869d6dc193410ef76f54a04f6499
20,749
def line_is_heading(line): """Check whether a line of output generated by :func:`report_metrics()` should be highlighted as a heading.""" return line.endswith(':')
e0696c15188de89cd81caf84caf854aa2f15e11e
20,751
def stringify_json_data(data): """Convert each leaf value of a JSON object to string.""" if isinstance(data, list): for i, entry in enumerate(data): data[i] = stringify_json_data(entry) elif isinstance(data, dict): for key in data: data[key] = stringify_json_data(data[key]) elif not isinstance(data, str): return str(data) return data
fb40ba20b0c7fc66f7f7c3c4fd9daa1a2f7a3428
20,752
def sort_to_buckets(buclist, data_iter, keyfunc=None): """Sorts items in data_iter into len(buclist)+1 buckets, with the buclist (presumably sorted) providing the dividing points. Items are put into a bucket if they are <= the corresponding buclist item, with items greater than buclist[-1] put into the final bucket. Items will be compared directly with buclist items unless a key func is provided, then buclist[x] >= keyfunc(item) will be performed""" if not keyfunc: keyfunc = lambda x: x res = [[] for x in range(len(buclist)+1)] for d in data_iter: for i, b in enumerate(buclist): if b > keyfunc(d): res[i].append(d) break else: res[-1].append(d) return res
73c6545ed8ed11758a7e7828d20eec1945e5ae12
20,753
import re def __unzip_labels(label): """unzips a string with more than one word Parameters ---------- label : tuple tuple or list of the text labels from the folder name Returns ------- list splited labels as list of strings """ labels = re.split('(?=[A-Z])', label) label = [label.lower() for label in labels] return label
c442eaadf18c51fdb7d9519bc00658b97657a225
20,754
import pathlib def get_requirements(): """Read requirements.txt and return a list of requirements.""" here = pathlib.Path(__file__).absolute().parent requirements = [] filename = here.joinpath('requirements.txt') with open(filename, 'r') as fileh: for lines in fileh: requirements.append(lines.strip()) return requirements
70cae7cba123f0bf738c5ca1c36315bb5534cb02
20,755
def check_user(user, event): """ Checks whether the ``ReactionAddEvent``, ``ReactionDeleteEvent``'s user is same as the given one. Parameters ---------- user : ``ClientUserBase`` The user who should be matched. event : ``ReactionAddEvent``, ``ReactionDeleteEvent`` The reaction addition or deletion event. """ return (event.user is user)
37c5e280ced5a3470ac9691e6cb0dcadf3e3c550
20,756
def _encode_to_utf8(s): """ Required because h5py does not support python3 strings converts byte type to string """ return s.encode('utf-8')
05be6da8d53dd8c29527d5822c27623a9eb49d18
20,757
from typing import List def countValidArrangements(adapters: List[int]) -> int: """ Counts valid arrangements of the adapters (sorted list) """ paths: List[int] = [0] * (adapters[-1] + 1) paths[0] = 1 for index in range(1, adapters[-1] + 1): for i in range(1, 4): if index - i in adapters: paths[index] += paths[index - i] return paths[-1]
fe296424a31a3e59c0af2516c67c284f463d2747
20,760
import time def sync_zones(conn, namespace, network, zones): """Synchronize a local set of zones with one in broadstreet. `namespace` should be something very unique. A UUID or identifer of a product. it will be pre-pended to the alias of all zones with a dot (.). `zones` is a dictionary keyed by the zone alias. The values are dictionaries of the zone attributes. `network` integer id of the network to modify. `conn` is a broadstreet API connection. """ def backoff(): # sleep for 50 milliseconds after making a WRITE request so to not # bombard the broadstreet API time.sleep(0.05) created = [] fixed = [] deleted = [] unchanged = [] ignored = [] have_zones = {} seen = set([]) for zone in conn.get_zones(network): alias = zone.get('alias') if not alias or not alias.startswith(namespace + '.'): ignored.append(zone['id']) # only consider zones in our namespace continue ign, alias = alias.split(namespace + '.', 1) assert not ign, ign if alias in seen: # DUPLICATE, let's delete to remove any abiguities deleted.append(zone) conn.delete_zone(network, zone['id']) backoff() continue seen.add(alias) have_zones[alias] = zone wanted = zones.get(alias, None) if wanted is None: deleted.append(zone) conn.delete_zone(network, zone['id']) backoff() else: if wanted['name'] != zone['name']: conn.update_zone( network, zone['id'], name=wanted['name']) fixed.append(zone['id']) backoff() else: unchanged.append(zone['id']) for alias, wanted in zones.items(): if alias in have_zones: continue ns_alias = namespace + '.' + alias created.append(ns_alias) conn.create_zone(network, wanted['name'], alias=ns_alias) backoff() return dict( created=created, unchanged=unchanged, deleted=deleted, fixed=fixed, ignored=ignored)
9e9d896216f73c038760420c1fad0158b53be036
20,761
def action_description(text): """Decorator for adding a description to command action. To display help text on action call instead of common category help text action function can be decorated. command <category> <action> -h will show description and arguments. """ def _decorator(func): func.description = text return func return _decorator
633f2f840a49b4239248dd48e32e58ae6b749f10
20,762
def unsquish(selected, unadapted_data): """Transform our selected squished text back to the original text selected: the selected sentences unadapted_data: the list of original unadapted sentences returns: the selected sentences, with clear text """ for i, line in enumerate(selected): items = line.split('\t') items[8] = unadapted_data[int(items[5]) - 1] selected[i] = '\t'.join(items) return selected
3853a24d1644fd68a7635cc9400cbc5354e99ace
20,763
import torch def sample_idxs_from_loader(idxs, data_loader, label): """Returns data id's from a dataloader.""" if label == 1: dataset = data_loader.dataset.dataset else: dataset = data_loader.dataset.dataset return torch.stack([dataset[idx.item()][0] for idx in idxs])
bef007d08b4dd2efe94a19525c166ffe859e67bd
20,764
def change_root(devices_map, new_root_border_id): """This function sets the specified device as a root and corrects all hierarchy taking new root in account """ def go_down_and_increase_depth(board_id, initial_depth): """This subfunction walks down the tree and increases the "depths" values for children. This keeps the hierarchy, just resets "depths" to new values """ devices_map[board_id]["depths"] = initial_depth # set new depth for the device for child_board_id in devices_map[board_id]["children"]: go_down_and_increase_depth(child_board_id, initial_depth+1) # call itself for each child with the increased depth def find_father(some_board_id): """This subfunction returns ancestor for the specified device (if available) """ for board_id in devices_map.keys(): # all devices for child_board_id in devices_map[board_id]["children"]: # their children if child_board_id == some_board_id: # if the device is in the list return board_id # we found the ancestor return None # or there is no ancestor (the specified device is already the root) postponed_relation_changes=[] # something the script cannot do immediately because it can break the algorithm b_id = new_root_border_id depth = 0 while True: father_b_id = find_father(b_id) if father_b_id: devices_map[father_b_id]["children"].remove(b_id) # cut the tree above the current device - remove itself from the list of children at the ancestor postponed_relation_changes.append({"father":b_id, "child":father_b_id}) # set up the ancestor as a child later go_down_and_increase_depth(b_id, depth) # resets depth values in tree below the current device if not father_b_id: break # we have reached old root, branches already treated, so we can stop b_id = father_b_id depth += 1 for new_relation in postponed_relation_changes: devices_map[new_relation["father"]]["children"].append(new_relation["child"]) # setup new hierarchy downwards the tree
a34414b288f95756ff5b14f320b056f4a6b902ac
20,765
def test_cards(): """ Discover test cards """ return [ # Auth.Net test cards "6011000000000012", # Braintree test cards, "6011111111111117", "6011000990139424", # Stripe test cards "6011111111111117", "6011000990139424", # WorldPay test cards "6011000400000000", ]
6d781b413ed4f7288274b78605ac77b454ca5b1f
20,766
import math def _close_descent(a, b, rtol, atol, equal_nan, flatten=True): """ Returns a boolean or sequence comparing to inputs element-wise within a tolerance. This is a recursive function intended to implement `allclose` and `isclose` Which one it implements depends on the value of `flatten`. If `flatten` is True, it returns a boolean. Otherwise it returns a value of the same shape as the inputs. This method uses coded exceptions to abort if the inputs are invalid. :param a: Input to compare :type a: number or sequence :param b: Input sequence to compare :type b: number or sequence :param rtol: The relative tolerance parameter (Optional). :type rtol: ``float``` :param atol: The absolute tolerance parameter (Optional). :type atol: ``float``` :param equal_nan: Whether to compare NaN’s as equal (Optional). :type equal_nan: ``bool``` :param flatten: Whether to flatten the final answer (Optional) :type flatten: ``bool`` :return: a boolean or sequence comparing to inputs element-wise :rtype: ``bool`` or sequence """ if type(a) in [float,int]: if not type(b) in [float,int]: try: test = b[0] except: raise ValueError() # Shape mismatch raise TypeError(2) # Content mismatch elif math.isinf(a) or math.isinf(b): return math.isinf(a) and math.isinf(b) elif not math.isnan(a) and not math.isnan(b): return abs(a-b) <= atol + rtol * abs(b) elif equal_nan: return math.isnan(a) and math.isnan(b) else: return False elif type(b) in [float,int]: try: test = a[0] except: raise ValueError() # Shape mismatch raise TypeError(1) # Content mismatch try: test = a[0] except: raise TypeError(1) # Content mismatch try: test = b[0] except: raise TypeError(2) # Content mismatch if len(a) != len(b): raise ValueError(6) if flatten: result = True for pos in range(len(a)): result = result and _close_descent(a[pos],b[pos],rtol, atol, equal_nan, flatten) else: result = [] for pos in range(len(a)): result.append(_close_descent(a[pos],b[pos],rtol, atol, equal_nan, flatten)) return result
d26f23110bea7261f0d7fda1cb20356da670a42e
20,767
def to_r_camel(s): """ Turns fields from python snake_case to the PCR frontend's rCamelCase. """ return "r" + "".join([x.title() for x in s.split("_")])
2380807ba449c65962657721842ef910d569aa03
20,769
def load_events(filename): """ load all events from a text file (W,E,X,Y,Z,WX,WY,WZ) """ events = [] with open(filename) as f: for line in f: #if line.find("G4W") != -1: # continue #if line.find("GGG") != -1: # continue line = line.strip() s = line.split() e = [] for n in s: e.append(float(n)) if len(e) == 8: events.append(e) if len(events) == 0: return None return events
c64c28ba050cbf06508e600ea92bf3ab1a00d825
20,770
import os def read_EMCEE_options(config, check_files=True): """ parses EMCEE options Parameters: config - configparser.ConfigParser instance check_files - *bool* - should we check if files exist? Returns: starting - dict - specifies PDFs for starting values of parameters parameters_to_fit - list - corresponding names of parameters min_values - dict - prior minimum values max_values - dict - prior maximum values emcee_settings - dict - a few parameters need for EMCEE priors_gauss - dict - gaussian priors priors_tabulated - dict - priors in a form of tabulated function (here defined by a text file with histogram [bin centers in the first column]) """ # mean and sigma for start section = 'EMCEE_starting_mean_sigma' parameters_to_fit = [var for var in config[section]] starting = {} for param in parameters_to_fit: words = config.get(section, param).split() if len(words) < 2: msg = 'Wrong input in cfg file:\n{:}' raise ValueError(msg.format(config.get(section, param))) if len(words) == 2: words.append('gauss') starting[param] = [float(words[0]), float(words[1])] + words[2:] # prior min an max values min_values = {} section = 'EMCEE_min_values' if section in config.sections(): for var in config[section]: min_values[var] = config.getfloat(section, var) max_values = {} section = 'EMCEE_max_values' if section in config.sections(): for var in config[section]: max_values[var] = config.getfloat(section, var) # EMCEE settings emcee_settings = { 'n_walkers': 4 * len(parameters_to_fit), 'n_steps': 1000, 'n_burn': 50, 'n_temps': 1} section = 'EMCEE_settings' files = ['file_acceptance_fractions', 'file_posterior', 'file_corner', 'file_trace'] if section in config.sections(): for var in config[section]: if var in files: emcee_settings[var] = config.get(section, var) if check_files and os.path.isfile(emcee_settings[var]): raise FileExistsError(emcee_settings[var]) else: emcee_settings[var] = config.getint(section, var) if emcee_settings['n_steps'] < emcee_settings['n_burn']: msg = "This doesn't make sense:\nn_steps = {:}\nn_burn = {:}" raise ValueError(msg.format( emcee_settings['n_steps'], emcee_settings['n_burn'])) emcee_settings['PTSampler'] = False if emcee_settings['n_temps'] > 1: emcee_settings['PTSampler'] = True # gaussian priors priors_gauss = dict() section = "priors_gauss" if section in config.sections(): for key in config[section]: words = config.get(section, key).split() priors_gauss[key] = [float(words[0]), float(words[1])] # tabulated priors priors_tabulated = dict() section = "priors_tabulated" if section in config.sections(): for key in config[section]: priors_tabulated[key] = config.get(section, key) out = (starting, parameters_to_fit, min_values, max_values, emcee_settings, priors_gauss, priors_tabulated) return out
56ca3d26c2a78d3f54edd729425d3de3a12ec0dd
20,771
def is_theme(labels, zen_issue): """Check If Issue Is a Release Theme. Use the input Github Issue object and Zenhub Issue object to check: * if issue is an Epic (Zenhub) * if issue contains a `theme` label """ if zen_issue['is_epic']: if 'theme' in labels: return True
380dbe8d4c9105a49a8ae211f55dcbfd35c38d3c
20,775
from typing import Dict from typing import Any import pprint def describe_item(data: Dict[str, Any], name: str) -> str: """ Function that takes in a data set (e.g. CREATURES, EQUIPMENT, etc.) and the name of an item in that data set and returns detailed information about that item in the form of a string that is ready to be sent to Discord """ item = data.get(name) if item is None: return "I'm sorry. I don't know about that thing" s = pprint.pformat(item, indent=2, sort_dicts=True) return s
4df9e99f713effc00714342f54e2bd135b580675
20,777
def mini(a,b): """ Minimal value >>> mini(3,4) 3 """ if a < b: return a return b
2d49ce51bd239dd5ceb57396d7ed107f1309c203
20,778
def measures_to_notes(measures): """ measures: List[List[List[str, float, float]]] -> List[List[str, float, float]] Breaks up a list of measures of notes into a list of notes. """ notes = [] for i in range(len(measures)): notes.extend(measures[i]) return(notes)
56e33cd543d5d503b0894c5d6f9e84a61f358313
20,780
def prod(iterable): """Computes the product of all items in iterable.""" prod = 1 for item in iterable: prod *= item return prod
f5ddea44c14a6d1dc77a3049723609358c1e8525
20,781
def areinstance(tokens, classes): """ >>> tokens = (TimeToken(15), TimeToken(16)) >>> areinstance(tokens, TimeToken) True >>> tokens = (TimeToken(15), DayToken(7, 5, 2018)) >>> areinstance(tokens, TimeToken) False >>> areinstance(tokens, (TimeToken, DayToken)) True """ assert isinstance(classes, type) or isinstance(classes, tuple), \ "Classes must either be a tuple or a type." if isinstance(classes, type): classes = (classes,) return all([ any([isinstance(token, cls) for cls in classes]) for token in tokens])
27cf41a668dfcd52975becd0ae96c81d2c9ab385
20,782
import json def aws_credentials_from_file(path): """ Get the AWS S3 Id and Secret credentials, from a json file in the format: {"AWS_ID": "AWS_SECRET"} """ with open(path, 'r') as f: key_dict = json.load(f) for key in key_dict: aws_id = key aws_secret = key_dict[key] return aws_id, aws_secret
68d04f3cdb223a9ccaa497981a8a0e3bc9a23378
20,783
def do_decomposition(gene_trail, selection): """Given a list of lists gene_trail and indexes for every item to be selected in every list in gene_trail, returns a list representing the corresponding decomposition. For example, if gene_trail is [['a', 'b'], ['c'], ['d','e']] and the index for the first list (['a', 'b']) is 0, the index for the second list (['c']) is 0, and the index for the third list (['d', 'e']) is 1, then the corresponding decomposition of gene_trail is ['a', 'c', 'e']. :param gene_trail: list of lists where list items are identifiers of genes involved in reactions of a CoMetGeNe trail :param selection: dict storing the currently selected item for every list in gene_trail :return: list representing the decomposition of gene_trail given by indexes stored in 'selection' """ for i in range(len(gene_trail)): assert i in selection.keys() for list_index in selection: assert 0 <= selection[list_index] < len(gene_trail[list_index]) decomposition = list() for i in range(len(gene_trail)): decomposition.append(gene_trail[i][selection[i]]) return decomposition
cbe3a942db5fb447f9e7ffe8f0866419b04f81f1
20,784
def processEnum(enum, tableExists=True, join=True): """ Take an Enum object generated by the PyDBML library and use it to generate SQLite DDL for creating an enum table for "full" enum emulation mode only. Parameters: enum (Enum): Enum object generated by PyDBML library representing an SQL enum. tableExists (bool): Default is True. If True, all generated `CREATE TABLE` SQLite statements will have `IF NOT EXISTS` language included. join (bool): Default is True. If True, function will `join` the result list of string segments with an empty string and return the resulting string to you. Otherwise, the one-dimensional list of string segments will be returned to you directly. Returns: str or list of str: SQLite DDL for creating a table to emulate SQL enum functionality. """ segments = [] segments.append(f'CREATE TABLE {"IF NOT EXISTS" if tableExists else ""} {enum.name} (\n id INTEGER PRIMARY KEY,\n type TEXT NOT NULL UNIQUE,\n seq INTEGER NOT NULL UNIQUE\n);\n') for i, v in enumerate(enum.items): segments.append(f'INSERT INTO {enum.name}(type, seq) VALUES (\'{v.name}\', {i + 1});\n') if join: segments = "".join(segments) return segments
832ca77c1287790ea33b26baacdeee5b3f611785
20,785
from typing import OrderedDict def _get_file_metadata(help_topic_dict): """ Parse a dictionary of help topics from the help index and return back an ordered dictionary associating help file names with their titles. Assumes the help dictionary has already been validated. """ retVal = OrderedDict() for file in sorted(help_topic_dict.keys()): retVal[file] = help_topic_dict[file][0] return retVal
6e03564f28ebc9ab8dc8ce12d314d18c35cf3518
20,786
def teens_to_text(num): """ >>> teens_to_text(11) 'eleven' >>> teens_to_text(15) 'fifteen' """ if num == 10: return 'ten' elif num == 11: return 'eleven' elif num == 12: return 'twelve' elif num == 13: return 'thirteen' elif num == 14: return 'fourteen' elif num == 15: return 'fifteen' elif num == 16: return 'sixteen' elif num == 17: return 'seventeen' elif num == 18: return 'eighteen' elif num == 19: return 'nineteen'
1c3288b9a152fe5b341c77b4bac66797b1aaf616
20,789
def is_no_overlap(*set_list): """ Test if there's no common item in several set. """ return sum([len(s) for s in set_list]) == len(set.union(*[set(s) for s in set_list]))
300253d499c22bb398837c9766a9f5d8f7b5c720
20,790
from warnings import warn def _format_1(raw): """ Format data with protocol 1. :param raw: returned by _load_raw :return: formatted data """ data, metadata = raw[0]['data'], raw[0]['metadata'] # Check for more content if len(raw) > 1: base_key = 'extra' key = base_key count = 0 while key in metadata: key = base_key + f'_{count}' count += 1 metadata[key] = raw[1:] warn('File contains extra information which will be returned in ' f'metadata[{key}].') return data, metadata
0939ffb4242828a7857f0f145e4adeb90ec6cff1
20,791
def get_abyss_rank_mi18n(rank: int, tier: int) -> str: """Turn the rank returned by the API into the respective rank name displayed in-game.""" if tier == 4: mod = ("1", "2_1", "2_2", "2_3", "3_1", "3_2", "3_3", "4", "5")[rank - 1] else: mod = str(rank) return f"bbs/level{mod}"
869946e701918c4d43af88bcca693ab9361b9608
20,792
import requests def post_request(headers, json_data, url): """ Posts request to given url with the given payload args ---------- headers : dict Holds authentication data for the request json_data : json The request payload url : boolean The api url """ return requests.request(method="POST", url=url, headers=headers, data=json_data)
6a8a69ca7b5d13be02e037e8e283f9a44d69b54b
20,793
def first(iterable): """ Gets the first element from an iterable. If there are no items or there are not enough items, then None will be returned. :param iterable: Iterable :return: First element from the iterable """ if iterable is None or len(iterable) == 0: return None return iterable[0]
246ca69493a601343e1b1da201c68e2d8a3adc55
20,794
def distribute_conc_by_cover(data, total_conc): """ Divides the total concentration of the primers proportionally to the number of sequences detected by each primer. Arguments: -data: a pandas dataframe with sequences detected by the oligo as returned by remove_redundant_after_inosine_addition -total_conc: total primer concentration in nM """ detected = data.sum(axis = 1) > 0 data = data.ix[detected, :] effec_conc = 200 / float(len(data.index)) ind_conc = (float(1) / data.sum(axis = 1)) * effec_conc conc = data.apply(lambda x: x*ind_conc, axis = 0).sum() return dict(conc)
e08f8168f6019689b717769264cc8a02c1444917
20,795
import re def allsplitext(path): """Split all the pathname extensions, so that "a/b.c.d" -> "a/b", ".c.d" """ match = re.search(r'((.*/)*[^.]*)([^/]*)',path) if not match: return path,"" else: return match.group(1),match.group(3)
06baf4d1a58c9f550bd2351171db57a8fc7620d2
20,796
def update_static_alloc_plan_and_get_size(self): """update static memory allocation plan without actual allocation :return: a dict that maps from comp node to size of allocation in bytes """ return {k: v for k, v in self._update_static_alloc_plan_and_get_size()}
7b6838d4272b3a5f631f2220c537a7f805548bce
20,797
def is_prepositional_tag(nltk_pos_tag): """ Returns True iff the given nltk tag is a preposition """ return nltk_pos_tag == "IN"
746a1439613a2203e6247924fe480792e171eb7b
20,798
import re def clean_data(d): """Replace newline, tab, and whitespace with single space.""" return re.sub("\s{2}", " ", re.sub(r"(\t|\n|\s)+", " ", d.strip()))
3c6b9032588d0cb2ca359ff27d727ade7011048a
20,801
def sample_to_sum(N, df, col, weights): """Sample a number of records from a dataset, then return the smallest set of rows at the front of the dataset where the weight sums to more than N""" t = df.sample(n=N, weights=weights, replace=True) # Get the number of records that sum to N. arg = t[col].cumsum().sub(N).abs().astype(int).argmin() return t.iloc[:arg + 1]
bd4ac2d5885c6de02d61fc0acb03f6f314d1c3d4
20,803
def calc_alpha_init(alpha, decay): """ Calculate the numerator such that at t=0, a/(decay+t)=alpha """ if not decay or decay <= 0: return alpha else: return float(alpha * decay)
41725753868a01b89f0a1ba0bc0b37b0ac2499c2
20,807
import requests def get_geolocation(all_the_ip_address): """ Given a list of lists from `get_addresses()`, this function returns an updated lists of lists containing the geolocation. """ print("Getting geo information...") updated_addresses = [] counter = 1 # update header header_row = all_the_ip_address.pop(0) header_row.extend(['Country', 'City']) # get geolocation for line in all_the_ip_address: print("Grabbing geo info for row # {0}".format(counter)) r = requests.get('https://freegeoip.net/json/{0}'.format(line[0])) line.extend([str(r.json()['country_name']), str(r.json()['city'])]) updated_addresses.append(line) counter += 1 updated_addresses.insert(0, header_row) return updated_addresses
8c4d92a5a19caf663f635760c1c490abbe40868f
20,808
def tuple_to_list(t): """ Convert a markup tuple: (text,[(color,pos),...]) To a markup list: [(color,textpart),...] This is the opposite to urwid.util.decompose_tagmarkup """ (text,attrs) = t pc = 0 l = [] for (attr,pos) in attrs: if attr == None: l.append(text[pc:pc+pos]) else: l.append((attr, text[pc:pc+pos])) pc += pos return l
466767cd7d1d43665141908661ca75b291b4db50
20,811
def getMPlugs(mFn, mPlugList): """ get MPlugs from a list :param mFn: mFn :param mPlugList: list :return: list """ if type(mPlugList) == list: returnList = list() for plug in mPlugList: mPlug = mFn.findPlug(plug, False) if mPlug.isArray or plug == "worldMatrix": returnList.append(mPlug.elementByLogicalIndex(0)) else: returnList.append(mPlug) return returnList elif type(mPlugList) == tuple: returnTuple = list() for inputList in mPlugList: returnTuple.append([mFn.findPlug(mPlug, False) for mPlug in inputList]) return tuple(returnTuple)
195cfd07e8771eaa4707e722742de68c0feed9df
20,812
from typing import Iterable from typing import Tuple import networkx def build_graph(data: Iterable[Tuple[dict, str, dict]]): """ Builds a NetworkX DiGraph object from (Child, Edge, Parent) triples. Each triple is represented as a directed edge from Child to Parent in the DiGraph. Child and Parent must be dictionaries containing all hashable values and a 'name' key (this is the name of the node in the DiGraph). Edge must be a string representing an edge label from Child to Parent. :param data: Iterable of (Child, Edge, Parent) triples. :rtype: networkx.DiGraph """ g = networkx.DiGraph() for child_attrs, edge, parent_attrs in data: if 'name' not in child_attrs or 'name' not in parent_attrs: raise ValueError( "Both child and parent dicts must contain a 'name' key.\n" "Provided Child data: {}\n" "Provided Parent data: {}\n".format(child_attrs, parent_attrs) ) # Copy dicts so popping 'name' doesn't affect the underlying data child_attrs, parent_attrs = child_attrs.copy(), parent_attrs.copy() child_name, parent_name = child_attrs.pop('name'), parent_attrs.pop('name') # Update node attributes only if the updated version has strictly more data # than the previous version if child_name not in g or set(child_attrs).issuperset(g.nodes[child_name]): g.add_node(child_name, **child_attrs) if parent_name not in g or set(parent_attrs).issuperset(g.nodes[parent_name]): g.add_node(parent_name, **parent_attrs) g.add_edge(child_name, parent_name, label=edge) return g
6d43f3d2b9698eaac54cfcee38fd005e6762eaf6
20,813
from xml.sax.saxutils import escape def esc(str): """XML escape, but forward slashes are also converted to entity references and whitespace control characters are converted to spaces""" return escape( str, {"\n": " ", "\t": " ", "\b": " ", "\r": " ", "\f": " ", "/": "&#47;"} )
0a235e21f0bd911da0bfe62d7cf70b5b2e8b63e7
20,814
def macthing_templates(templates_peptide, templates_minipept): """ The function searching atoms of peptide bonds in small peptide pattern. Parameters ---------- templates_peptide : list List of atoms of peptide bonds. templates_minipept : list List of atoms of part of peptide bonds. Returns ------- templates_minipept_cop : list Corrected list of atoms of part of peptide bonds. """ templates_minipept_cop = templates_minipept.copy() for i in templates_minipept: check = 0 for tp in templates_peptide: if check == len(i): continue check = 0 for si in i: if si in tp: check+=1 if check != len(i): templates_minipept_cop.remove(i) return templates_minipept_cop
a5f42305ab57648e4243c3f4cac51aa347dd8c60
20,815
def divides(a, b): """ Return True if a goes into b. >>> divides(3, 6) # Tests! True >> divides(3, 7) False """ return b % a == 0
c80223323997d940e35b1b05f16d031d1ae769aa
20,816
def compare_versions(actual, expected): """Compare Python versions, return the exit code.""" if actual < expected: print("Unsupported version {}.{}".format(actual[0], actual[1])) return 1 else: m = "OK: actual Python version {}.{} conforms to expected version {}.{}" print(m.format(actual[0], actual[1], expected[0], expected[1])) return 0
1ad44078c13f8710df5681ac2c1bd10f8f547990
20,817
import torch def to_gpu(x, on_cpu=False, gpu_id=None): """Tensor => Variable""" if torch.cuda.is_available() and not on_cpu: x = x.cuda(gpu_id) return x
49c8c91535f4ba93f682f6f28ed32c976eb56211
20,818
def find_first_feature(tree): """Finds the first feature in a tree, we then use this in the split condition It doesn't matter which feature we use, as both of the leaves will add the same value Parameters ---------- tree : dict parsed model """ if 'split' in tree.keys(): return tree['split'] elif 'children' in tree.keys(): return find_first_feature(tree['children'][0]) else: raise Exception("Unable to find any features")
d57d29c3aadb0269d39fa73d27cd56416cc3e456
20,819
def sum_product(array: list) -> dict: """ BIG-O Notation: This will take O(N) time. The fact that we iterate through the array twice doesn't matter. :param array: list of numbers :return: """ total_sum = 0 total_product = 1 for i in array: total_sum += i for i in array: total_product *= i return {"sum": total_sum, "product": total_product}
53a8ac6000f9c6f722ef7159e31d1dc5f069fa9b
20,821
import six import ipaddress def IPv4ToID(x): """ Convert IPv4 dotted-quad address to INT. """ # try: # if six.PY2: # id = int(ipaddress.IPv4Address(x.decode('utf-8'))) # else: # id = int(ipaddress.IPv4Address(x)) # except ipaddress.AddressValueError as err: # if 'Expected 4 octets' in err.str: # logger.info(str(err)) # return id if six.PY2: id = int(ipaddress.IPv4Address(x.decode('utf-8'))) else: id = int(ipaddress.IPv4Address(x)) return id
f73feab66969ed8adc683a1ebf36cef4de5d5825
20,823
from pathlib import Path def get_file_extension(filepath: str) -> str: """ Returns the extension for a given filepath Examples: get_file_extension("myfile.txt") == "txt" get_file_extension("myfile.tar.gz") == "tar.gz" get_file_extension("myfile") == "" """ extension_with_dot = "".join(Path(filepath).suffixes).lower() if extension_with_dot: return extension_with_dot[1:] return ""
d90dd071298c1ee429d913abd831030a60086b1b
20,826
def unescape(s): """The inverse of cgi.escape().""" s = s.replace('&quot;', '"').replace('&gt;', '>').replace('&lt;', '<') return s.replace('&amp;', '&')
3bad6bc3679405dd0d223ea8ab6362a996067ea5
20,827
from itertools import accumulate from collections import Counter def ZeroSumRange(a, unhashable = False): """ 総和が零元になるような空でない連続する部分列の数 """ *a, = accumulate(a) e = a[0] - a[0] if unhashable: a = [tuple(b) for b in a] e = tuple(e) C = Counter(a) C[e] += 1 return sum(v * (v - 1) // 2 for v in C.values())
463789ce4938ac4b05eec0c850e1adab588d9480
20,828
def pick_inputs(workspace): """ Figure out which inputs don't yet have fragments. This is useful when some of your fragment generation jobs fail and you need to rerun them. """ frags_present = set() frags_absent = set() for path in workspace.input_paths: if workspace.fragments_missing(path): frags_absent.add(path) else: frags_present.add(path) # If no fragments have been generated yet, just return the directory to # make the resulting 'klab_generate_fragments' command a little simpler. if not frags_present: return [workspace.input_dir] print('{0} of {1} inputs are missing fragments.'.format( len(frags_absent), len(workspace.input_paths))) return sorted(frags_absent)
35e9ca4a83fdc25f870cdb8e4f094881569787c1
20,830
import json def load_entity_uri_to_name(path): """ :param path: :return: json with uri, label """ return json.load(open(path))
37454284fa07da204dc4fc59029d470fd66e4b26
20,831
import random def seq_permutation(seq: str, k: int = 1) -> str: """Shuffle a genomic sequence Args: seq (str): Sequence to be shuffled. k (int): For `k==1`, we shuffle individual characters. For `k>1`, we shuffle k-mers. Returns: Shuffled sequence. """ if k == 1: return ''.join(random.sample(seq, len(seq))) else: kmers = [seq[i:(i + k)] for i in range(0, len(seq), k)] return ''.join(random.sample(kmers, len(kmers)))
a789c2a5cb00e2e5fd140ba889510e6b7fd556d3
20,832
import math def get_normalized_meter_value(t): """ Expects a value between 0 and 1 (0 -> 00:00:00 || 1 -> 24:00:00) and returns the normalized simulated household consumption at that time, according to this Graph: http://blog.abodit.com/images/uploads/2010/05/HomeEnergyConsumption.png The formula can be examinated on https://www.desmos.com/calculator The formula used is this one: \frac{\left(\sin\left(x\right)\ +\ \frac{x}{2.5}\ \cdot\left(-e^{\frac{x}{12}}+2.85\right)+1\right)}{5} It tries to mimic the Graph as normalized values. f(0) = power consumption at 00:00:00 f(PI*4) = power consumption at 24:00:00 """ x = t * math.pi * 4 meter_value = math.sin(x) + (x/2.5) * (-math.pow(math.e, x / 12.0) + 2.85) + 1 normalized_meter_value = meter_value / 5.0 return normalized_meter_value
03f40967f05de4d23d7600286bfc2927fea544d6
20,835
def is_quality_snv(var_to_test, AC_cutoff=None): """ high quality variants will have FILTER == None AND we are ignoring insertions and deltions here """ if AC_cutoff is not None: try: AC_cutoff = int(AC_cutoff) return var_to_test.FILTER is None and var_to_test.INFO.get( 'variant_type') == 'snv' and var_to_test.INFO.get('AC') <= AC_cutoff except ValueError: AC_cutoff = None return var_to_test.FILTER is None and var_to_test.INFO.get('variant_type') == 'snv'
d474583126df19deb03b607f85880f79f31fdd1d
20,836
def delete_nth(order, max_e): """create a new list that contains each number of lst at most N times without reordering. """ answer_dict = {} answer_list = [] for number in order: if number not in answer_dict: answer_dict[number] = 1 else: answer_dict[number] += 1 if answer_dict[number] <= max_e: answer_list.append(number) return answer_list
8f057438b6778bc00563ca2d9752cd2f7c8bfff7
20,837
def squared_dist(x1, x2): """Computes squared Euclidean distance between coordinate x1 and coordinate x2""" return sum([(i1 - i2)**2 for i1, i2 in zip(x1, x2)])
c4ae86e54eb1630c20546a5f0563d9db24eebd3a
20,838
def S_find_square_floors_values(_data_list): """ Returns locations of values which do not change """ s_data = [] ds = len(_data_list) pd = _data_list[0] start = end = -1 for i in range(1, ds): if pd == _data_list[i]: if start == -1: start = i - 1 if start != -1 and i == ds-1: s_data.append((start, ds-1)) else: if start != -1: end = i - 1 s_data.append((start, end)) start = end = -1 pd = _data_list[i] return s_data
09f875bdcbd05dcc889dfc1b39f186812f2cd27c
20,839
def itensity_rescaling_one_volume(volume): """ Rescaling the itensity of an nd volume based on max and min value inputs: volume: the input nd volume outputs: out: the normalized nd volume """ out = (volume + 100) / 340 return out
58ee7f00c54b063fac23eeb166df5f6d6adb2941
20,840
import os import stat def make_job_file(job_dir,job_name,cores,mem,gpus,gpu_mem,low_prio,requirements,rank,name=None,venv=None,\ conda=None,conda_name=None,commands_fn=None,command=None,queue_count=1,job_num=0,interactive=False): """ Creates the condor_submit file and shell script wrapper for the job. Returns: file_name: The string path to the created condor_submit file. """ # Set up job file job_path = job_dir + "/" + str(job_num) job_fn = job_path + ".job" job_file = open(job_fn, "w") # Condor Settings if job_name is not None: job_file.write("batch_name = \"" + job_name.strip() +"\"\n") job_file.write("request_cpus = " + str(cores) + "\n") job_file.write("request_memory = " + str(mem) + " GB\n") job_file.write("request_gpus = " + str(gpus) + "\n") if low_prio: job_file.write("priority = -10\n") if gpus > 0: job_file.write("requirements = (CUDAGlobalMemoryMb >= " + str(gpu_mem * 1000) + ")\n") if requirements is not "": job_file.write("requirements = " + requirements + "\n") if rank is not "": job_file.write("rank = " + rank + "\n") # Log Files job_file.write("output = " + job_path + "_$(Process).out\n") job_file.write("error = " + job_path + "_$(Process).err\n") job_file.write("log = " + job_path + "_$(Process).log\n") # Shell Script Wrapper shell_wrapper = os.open(job_path + ".sh", flags=os.O_WRONLY | os.O_CREAT | os.O_TRUNC, mode=stat.S_IRWXU) shell_wrapper_text = "#!/usr/bin/env bash\n" if venv is not None: shell_wrapper_text = shell_wrapper_text + "source $PYENV/bin/activate\n" job_file.write("environment=\"PYENV=" + venv.strip() + "\"\n") elif conda is not None: shell_wrapper_text = shell_wrapper_text + "source " + os.path.join(conda.strip(), "etc/profile.d/conda.sh") + "\n" if conda_name is not None: shell_wrapper_text = shell_wrapper_text + "conda activate " + conda_name.strip() + "\n" else: shell_wrapper_text = shell_wrapper_text + "conda activate" + "\n" shell_wrapper_text += "exec \"$@\"\n" os.write(shell_wrapper, bytes(shell_wrapper_text, encoding="utf-8")) os.close(shell_wrapper) # Assign Executable, Arguments, and Queue Commands job_file.write("executable = " + job_path + ".sh\n") if commands_fn is not None: job_file.write("arguments = $(command)\n") job_file.write("queue command from " + commands_fn.strip()) elif command is not None: job_file.write("arguments = " + command.strip() + "\n") if queue_count > 1: job_file.write("queue " + str(queue_count)) elif queue_count == 1: job_file.write("queue") elif interactive: job_file.write("queue") job_fn = f"-i {job_fn}" job_file.close() return job_fn
fcdc416a0db825da633921ae1a8f7e920728ef1c
20,841
def kniferism(words): """Convert a list of words formatted with the kniferism technique. :param words (list) - The list of words to operate on :rtype words (list) - The updated list of words >>> kniferism(['foo', 'bar']) >>> ['fao', 'bor'] """ "Mid: f[o]o b[a]r => fao bor" if len(words) < 2: raise ValueError('Need more than one word to combine') new_words = [] for k, word in enumerate(words): try: middle_second = int(len(words[k + 1]) / 2) middle_first = int(len(word) / 2) new_words.append('{}{}{} {}{}{}'.format( word[:middle_first], words[k + 1][middle_second], word[middle_first + 1:], words[k + 1][:middle_second], word[middle_first], words[k + 1][middle_second + 1:])) except IndexError: continue return new_words
b36f5eda1f0df44c7d2f772505fbd83af6d69913
20,842
def bed2gtf(infile, outfile): """Convert BED to GTF chrom chromStart chromEnd name score strand """ with open(infile) as r, open(outfile, 'wt') as w: for line in r: fields = line.strip().split('\t') start = int(fields[1]) + 1 w.write('\t'.join([ fields[0], 'BED_file', 'gene', str(start), fields[2], '.', fields[5], '.', 'gene_id "{}"; gene_name "{}"'.format(fields[3], fields[3]) ]) + '\n') return outfile
92892e73e56851d3cb4054d3bdf42f029223e0a6
20,843
import math def ucb(board, tree): """Given board, return the move with the optimal UCB depending on player""" t = tree[str(board)][1] # node's total visits best_ucb = None best_move = None ucb = 0 is_white = board.turn if is_white: best_ucb = -float('inf') else: best_ucb = float('inf') for move in board.legal_moves: board.push(move) r_j = tree[str(board)][0] / tree[str(board)][1] # child node's win percentage n_j = tree[str(board)][1] # child node's total if is_white: ucb = r_j + math.sqrt( (2 * math.log(t)) / n_j) if ucb >= best_ucb: best_ucb = ucb best_move = move else: ucb = r_j - math.sqrt( (2 * math.log(t)) / n_j) if ucb <= best_ucb: best_ucb = ucb best_move = move board.pop() return best_move
274f1bbfe069ab31a008384715a18e2cad0bf986
20,844
import time def timetostr(timestamp, format="%Y-%m-%d %H:%M:%S"): """把时间戳转换成时间字符串 :param timestamp: 时间戳 :param format: 格式 """ timestr = time.strftime(format, time.localtime(timestamp)) return timestr
16a7e6d6bed3b7cfb15827d0a8472982d6451c46
20,845
import re def strip_quotes(text): """ the pipeline we use does really weird stuff to quotes. just going to remove them for now or forever """ text = re.sub(r"``", r"''", text) text = re.sub(r'"', r"'", text) return text
0d30cf02026c75471a48692920718faf45866172
20,846
def convert_to_interval(id_array): """ Convert some consecutive timestamps' id to some intervals for easier retrieval :param id_array: a list containing all the camera ids of outliers :return: a list of strings representing the abnormal camera id ranges """ interval = [] current_interval = [id_array[0]] digit = id_array[0] // 100 for i in range(1, len(id_array)): now = id_array[i] if now // 100 == digit: current_interval.append(now) elif now // 100 == digit + 1: current_interval.append(now) digit += 1 else: if current_interval[-1] - current_interval[0] >= 5: interval.append(str(current_interval[0]) + "-" + str(current_interval[-1])) current_interval = [now] digit = now // 100 if current_interval[-1] - current_interval[0] >= 5: interval.append(str(current_interval[0]) + "-" + str(current_interval[-1])) return interval
dd9f25ecee49d0b7dc41015bd9ee2ebb3475a879
20,847
import re def addhead(re_check, head): """ 添加文件头 :param re_check: :param head: :return: """ re_check_o = re.compile(re_check) def _(file, code, config): if not re_check_o.match(file) is None: return file, head + code return file, code return _
a513aac065a389811410d1bf600d350218668ef4
20,848
def de_bruijn_strings(k: int, n: int): """ De Bruijn sequence for alphabet size k (0,1,2...k-1) and subsequences of length n. Modifed wikipedia Sep 22 2013 to use strips """ global sequence global a a = "0" * k * n sequence = "" def db(t, p): global sequence global a if t > n: if n % p == 0: for j in range(1, p + 1): sequence = sequence + a[j] else: a = a[:t] + a[t - p] + a[t + 1 :] db(t + 1, p) for j in range(int(a[t - p]) + 1, k): a = a[:t] + str(j) + a[t + 1 :] db(t + 1, t) return sequence db(1, 1) return sequence
af52574fc89f215cec4918405b0099e616f565bb
20,849
def AU(): """ The `AU` function returns the Astronomical unit according to the IERS numerical standards (2010). """ return 1.49597870700e11
602f1528f896a948c07b970640cc43dc08e84f18
20,850
import re def clean_str(text, language='english'): """ Method to pre-process an text for training word embeddings. This is post by Sebastian Ruder: https://s3.amazonaws.com/aylien-main/data/multilingual-embeddings/preprocess.py and is used at this paper: https://arxiv.org/pdf/1609.02745.pdf """ """ Cleans an input string and prepares it for tokenization. :type text: unicode :param text: input text :return the cleaned input string """ text = text.lower() # replace all numbers with 0 text = re.sub(r"[-+]?[-/.\d]*[\d]+[:,.\d]*", ' 0 ', text) # English-specific pre-processing if language == 'english': text = re.sub(r"\'s", " \'s", text) text = re.sub(r"\'ve", " \'ve", text) text = re.sub(r"n\'t", " n\'t", text) text = re.sub(r"\'re", " \'re", text) text = re.sub(r"\'d", " \'d", text) text = re.sub(r"\'ll", " \'ll", text) elif language == 'french': # French-specific pre-processing text = re.sub(r"c\'", " c\' ", text) text = re.sub(r"l\'", " l\' ", text) text = re.sub(r"j\'", " j\' ", text) text = re.sub(r"d\'", " d\' ", text) text = re.sub(r"s\'", " s\' ", text) text = re.sub(r"n\'", " n\' ", text) text = re.sub(r"m\'", " m\' ", text) text = re.sub(r"qu\'", " qu\' ", text) elif language == 'spanish': # Spanish-specific pre-processing text = re.sub(r"¡", " ", text) elif language == 'chinese': # Chinese text needs to be segmented before pre-processing pass # remove commas, colons, semicolons, periods, brackets, hyphens, <, >, and quotation marks text = re.sub(r'[,:;\.\(\)-/"<>]', " ", text) # separate exclamation marks and question marks text = re.sub(r"!+", " ! ", text) text = re.sub(r"\?+", " ? ", text) text = re.sub(r"\s+", " ", text) return text.strip()
292626f6feafbf408ff04f763300cb288c135cee
20,851
def default_func(entity, attribute, value): """ Look in the entity for an attribute with the provided value. First proper attributes are checked, then extended fields. If neither of these are present, return False. """ try: return getattr(entity, attribute) == value except AttributeError: try: return entity.fields[attribute] == value except (AttributeError, KeyError): return False
0ec6636c20d3f5eedb7a7c1f2457f72ecddb7545
20,852
def get_last_file(paths): """Returns last modified file from list of paths""" if paths: return sorted(paths, key=lambda f: f.stat().st_mtime)[-1] return ""
51978ae6dbb3686de40eebda156825904895a854
20,853
from inspect import signature def check_callable(input_func, min_num_args=2): """Ensures the input func 1) is callable, and 2) can accept a min # of args""" if not callable(input_func): raise TypeError('Input function must be callable!') # would not work for C/builtin functions such as numpy.dot func_signature = signature(input_func) if len(func_signature.parameters) < min_num_args: raise TypeError('Input func must accept atleast {} inputs'.format(min_num_args)) return input_func
7358d0685fd6f04f6a2ecf75aee66288a25a5e08
20,854
def _sql_gen_intermediate_pi_aggregate(model, table_name="df_e"): """ This intermediate step is calculated for efficiency purposes. In the maximisation step, to compute the new pi probability distributions, we need to perform a variety of calculations that can all be derived from this intermediate table. Without this intermediate table, we'd be repeating these calculations multiple times. """ ccs = model.current_settings_obj.comparison_columns_list gamma_cols_expr = ", ".join([cc.gamma_name for cc in ccs]) sql = f""" select {gamma_cols_expr}, sum(match_probability) as expected_num_matches, sum(1- match_probability) as expected_num_non_matches, count(*) as num_rows from {table_name} group by {gamma_cols_expr} """ return sql
fc6c812dbf0eee9bc5a78590125e547c24f8cc9b
20,857
def evaluate_svm(test_data, train_data, classifier, logfile=None): """ Evaluates svm, writes output to logfile in tsv format with columns: - svm description - accuracy on test set - accuracy on train set """ train_x, train_y = train_data classifier.fit(train_x, train_y) test_x, test_y = test_data train_accuracy = classifier.score(train_x, train_y) test_accuracy = classifier.score(test_x, test_y) out_msg = '\t'.join((str(classifier.C), str(classifier.kernel), str(test_accuracy), str(train_accuracy))) print(out_msg) if logfile is not None: with open(logfile, 'a+') as lf: lf.writelines([out_msg]) return test_accuracy, train_accuracy
21550a91227eca49a05db2f3216bc72c50a74d1e
20,861
def flatten_results(results: dict, derivation_config: dict) -> dict: """Flatten and simplify the results dict into <metric>:<result> format. Args: results (dict): The benchmark results dict, containing all info duch as reduction types too derivation_config (dict): The configuration defining how metrics are derived from logs Returns: flat_results (dict): The flattened dict of all results in <metric>:<result> format """ flat_results = {} for metric, results_dict in results.items(): key = derivation_config.get(metric, {}).get("reduction_type", "mean") flat_results[metric] = results_dict[key] return flat_results
045c1e1d856c20b37d2d78d8dd6c3fd76cb91777
20,862
def path_string(obj, **kwargs): """return physical path as a string""" return "/".join(obj.getPhysicalPath())
bae683d392b519f8c43f5e9d1415abb8cf8de636
20,865
def mock_list_tags_response(): """Create mock for lambda list tags""" response = { "Tags": { "SvcOwner": "Cyber", "name": "lambda-function", "Environment": "test", "Service": "cyber-service", "SvcCodeURL": "https://github.com/alphagov/my-madeup-repo", "DeployedUsing": "Terraform", "Name": "lambda-function", } } return response
ade45167fe2c96ba99aa80fc3381e7ddd7290c53
20,866