content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def indent(string, indent): """ Indents each line of a string. """ return ( "\n".join( indent + l for l in string.splitlines() ) + ("\n" if string.endswith("\n") else ""))
7a6ee5f7a4a98a0f43957b82a926140b95f3568b
65,901
import pathlib import gzip def get_content_linenum_gzip(thepath: str, linenum: int): """Returns the line number of the specified gzipped file. Examples: >>> get_content_linenum_gzip('test_text.txt.gz', 3)\n "'blah blah'\\n" Args: thepath (str): Reference the file linenum (int): Specify the line number Returns: str: Returns the line number in the specified gzipped file """ path_obj = pathlib.Path(thepath).resolve() # The open() function can take a str object or a Path() object as an argument with gzip.open(path_obj, "rt") as f: lines = f.readlines() return lines[linenum - 1]
d7ff6cade52ea140a3895bd8459ea91156e8a53a
65,902
def assign(value, index, arr): """Fake functional numpy assignment Just to make things easier for function pipelines Args: value: the value to assing index: the index arr: the array Returns: Not a new array, but the same array updated >>> a = np.arange(6).reshape((2, 3)) >>> assign(1, (slice(None), 2), a) array([[0, 1, 1], [3, 4, 1]]) """ arr[index] = value return arr
446e918ee621c58c5213b185310ce5036a2f6e0a
65,904
import re def scrub_words(text): """Basic cleaning of texts.""" """Taken from https://github.com/kavgan/nlp-in-practice/blob/master/text-pre-processing/Text%20Preprocessing%20Examples.ipynb """ # remove html markup text=re.sub("(<.*?>)","",text) #remove non-ascii and digits text=re.sub("(\\W|\\d)"," ",text) # remove the extra spaces that we have so that it is easier for our split :) Taken from https://stackoverflow.com/questions/2077897/substitute-multiple-whitespace-with-single-whitespace-in-python text=re.sub(' +', ' ', text).strip() return text
ed7224d56b02e10fc683a742f1c1313b2b243c26
65,911
def read_file(path): """ Open a file, reads it into a string, closes the file, and returns the file text. """ fd = open(path) try: return fd.read() finally: fd.close()
d7917f960ab7474f80feb02da9ee3fb0c15ccf74
65,913
from pathlib import Path def __overwrite_file(output_path: Path, overwrite: bool) -> bool: """ If not automatically overwriting, ask the user if they want to continue Either delete the file and continue, or return that choice. :param output_path: Path :param overwrite: bool :return: bool """ if not overwrite: print(f'File \'{output_path.as_posix()}\' exists!') do_del = input('Delete/overwrite? [y/N] ') if do_del == 'y': output_path.unlink() output_path.touch() else: print('Not continuing.') return False else: output_path.unlink() output_path.touch() return True
ff964f81802816c4c30ecd8f36466ccb00d6a159
65,915
def stations_level_over_threshold(stations,tol): """For Task 2B - returns a list of tuples The tuples contain a station and then a relative water level the relative water level must be greater than tol returned list should be in descending order""" threshold_list=[] for station in stations: m=station.relative_water_level() if m==None: pass elif m>tol: threshold_list.append((station, m)) final_threshold_list = sorted(threshold_list, key=lambda x: x[1], reverse=True) return final_threshold_list
c0b37d3e026e26dc1d5b15099b7d28997fbbca7d
65,917
def get_distribution_code_payload(client: str = '100'): """Return distribution code payload.""" return { 'client': client, 'name': 'Test Memo Line', 'projectCode': '1111111', 'responsibilityCentre': '22222', 'stob': '9000', 'serviceLine': '20244', 'startDate': '2020-07-29' }
2e5933ad7d88dc93aed9fea2ed0313190d070fd6
65,923
import re def bad_date(date): """ Check if `date` (str) is not in format %Y-%m. """ if re.match('[12]\d{3}\-(?:0[1-9]|1[0-2])', date) != None: return False else: return True
2cbc2b2efad94e9f1995de925b02dc66187668b3
65,924
def join_int(fst, snd): """ Join two independent ints together - 1, 2 -> 12 - 12, 34 -> 1234 :param fst: First number :param snd: Second number """ assert fst >= 0 assert snd >= 0 buf = str(fst) + str(snd) return int(buf)
595031380879cdad2b15e481a6f9325f7ac3eca7
65,925
def handle_cputime_metric(common_props, sample): """ Create a sample object for a 'cputime' metric sample. :param common_props: Common properties shared with all other samples. :param sample: Original sample to extract values from. :return list: The created samples. """ cputime_samples = [] wallclock_key = u'-- WALL --' for data in sample['value'].itervalues(): wall_time = data[wallclock_key] for process, value in data.iteritems(): if process != wallclock_key: cputime_sample = dict(common_props) cputime_sample['process'] = process cputime_sample['value'] = value cputime_sample['wallclock'] = wall_time cputime_samples.append(cputime_sample) return cputime_samples
738417030e702b636183cc649e191b2df0491b6a
65,926
def getBasics(header): """ Return basic information from the header: 1. bias 2. gain 3. readout noise :Note: This function is useful for ACS WFC polarimetry data. :return: bias, gain, readnoise :rtype: list """ bias = header['CCDOFSTB'] gain = header['CCDGAIN'] readnoise = header['READNSEB'] return bias, gain, readnoise
f7ef5d060a816d73e7748d77084af19b91313958
65,931
from typing import List def make_code_block(text: str, language: str = "gdscript") -> str: """Returns the text formatted as a reStructured code block """ lines: List[str] = text.split("\n") code: str = " " + "\n ".join(lines) return ".. code-block:: {}\n\n{}\n".format(language, code)
881e0159b56ea767c5bb8c880b1c8584182b38f8
65,933
def convert_list_to_xyz_str(atoms: list): """Convert nested list of atom and coordinates list into xyz-string. Args: atoms (list): Atom list of type `[['H', 0.0, 0.0, 0.0], ['C', 1.0, 1.0, 1.0], ...]`. Returns: str: Information in xyz-string format. """ xyz_str = str(int(len(atoms))) + "\n" for a_iter in atoms: xyz_str = xyz_str + "\n" _line_str = "{:} {:.10f} {:.10f} {:.10f}".format(*a_iter) xyz_str = xyz_str + _line_str return xyz_str
9df4efbf4de59c51009df6452d177c3071963699
65,937
def is_valid_argument_model(argument): """ Validates CLI model specified argument level. With this validation, every argument is mandated to have name and help at the minimum. Any other custom validation to the arguments can go here. Parameters ---------- argument(dict): A dictonary object which argument parameters. Full list: gdk.common.consts.arg_parameters Returns ------- (bool): Returns True when the argument is valid else False. """ if "name" not in argument or "help" not in argument: return False # Add custom validation for args if needed. return True
1d2bb1396b8b117bf1cd3c9aaaacf908c202d517
65,938
import re def _count_infix_segments(string: str) -> int: """Counts total number of infix segments, ignores @-strings.""" if "[" not in string: return 0 if "@" in string: return 0 nosuffix = re.sub(r"\][^\]]*$", "]", string) noprefix = re.sub(r"^[^\[]*\[", "[", nosuffix) nobrackets = re.sub(r"\[[^\]]*\]", "", noprefix) return len(nobrackets)
6b11cb8279b7e13c793660064537d7322c3d664b
65,939
def _separate_dirs_files(models): """ Split an iterable of models into a list of file paths and a list of directory paths. """ dirs = [] files = [] for model in models: if model['type'] == 'directory': dirs.append(model['path']) else: files.append(model['path']) return dirs, files
4c16305e4ede2b6a49486f30c980d123b7a4dfed
65,940
def Mapping(metric: dict, pattern: str = "/", http_method: str = "GET", delta: int = 1, last: str = "false"): """Builder of parameters to create Mapping Args: :param metric: Metric to be mapped :param pattern: URL pattern to map; deafult: / :param http_method: Method to map; default: GET :param delta: Incremental unit; default: 1 :param last: If true, no other rules will be processed after matching this one; default: false""" metric_id = metric["id"] # pylint: disable=possibly-unused-variable del metric return locals()
161abc4833d52f6df403de8e8d49fae57ee15fd9
65,942
def parse_line_str(line_str): """ Parse the line string to a list """ line_list = line_str.split(",") # cut the NEWLINE line_len = len(line_list) - 1 line_list[line_len] = line_list[line_len].strip('\n').strip('\r') return line_list
24857eba3e5a25f13d2a4be4edf05473aa66e417
65,943
def common_denominator(number_one, number_two, range_one, range_two): """ Recursion solution to this problem even though it is not the best way of doing it. Base case is when the modulo of both numbers and the second range value is zero, or if they are the same. Parameters ---------- number_one : int the first number of the two numbers to find the common denominator between. number_two : int the second number of the two numbers to find the common denominator between. range_one : int the lowest integer for a range of values to find the common denominator in. range_two : int the highest integer for a range of values to find the common denominator in. Returns ------ Integer the value that the two input have number both have a denominator with, or the lowest int in the range given if no denominator was found. """ if number_one % range_two == 0 and number_two % range_two == 0: return range_two if range_one == range_two: return range_one return common_denominator(number_one, number_two, range_one, range_two-1)
def9834161a8e9c46551681329d4c77c19244968
65,946
def receptive_field_conv(kernel, stride, n0=1, n_lyrs=1): """ Compute receptive field for convolution layers Parameters ---------- kernel kernel size stride stride size n0 receptive field from previous layer n_lyrs number of layers Returns ------- receptive field """ n1 = n0 for i in range(n_lyrs): n1 = kernel + (n1 - 1) * stride return n1
fc99c29ca735b7d11db60b0c53b3f59bfa1b6977
65,949
def sanitize_smiles_file_name(file_name): """Sanitizes a file name which contains smiles patterns. Currently this method simply replaces any `/` characters with a `_` character. Parameters ---------- file_name: str The file name to sanitize Returns ------- The sanitized file name. """ return file_name.replace('/', '_')
7b9922403fd598492b419db1d796bae0de8e920c
65,951
def _dot_one(version): """Returns the version with an appended '.1-signed' on it.""" return u'{0}.1-signed'.format(version)
eb51343ad8e8cdb993cdf4683525dcad9c4c8e99
65,953
def set_exceptions(list_of_santas): """Set up any gifting exclusions that may exist""" print("Please set any matchup exclusions\n") #dict comprehension here would be more pythonic exception_dict = {} for name in list_of_santas: exception_dict[name] = [] for name in list_of_santas: exception_dict[name] = [name] print("Who can %s not draw?" %name) name_in = input() while 1: if name_in == "": break if name_in not in list_of_santas or name_in == name: print("Please enter a valid name;\nThey must be a Santa, and cannot be themself") else: exception_dict[name].append(name_in) print("Who else can %s not draw?" %name) name_in = input() return exception_dict
8c47d6760554628d690a8517fe009981317d2fc9
65,957
import functools def map_wrap(f): """Wrap standard function to easily pass into 'map' processing. """ @functools.wraps(f) def wrapper(*args, **kwargs): return f(*args, **kwargs) return wrapper
83e2d9cd2bd36e993dd168a64236095db3608acc
65,960
import pickle def write_pickle(grid, filename, protocol=-1, **kwargs): """ write a GridData object to disk in pickle (.pickle or .npy extension) format using the pickle module :param grid: grid to be saved :type grid: ~uquake.core.data.grid.GridData :param filename: full path to file with extension :type filename: str :param protocol: pickling protocol level :type protocol: int """ with open(filename, 'wb') as of: pickle.dump(grid, of, protocol=protocol) return True
f96ecf25a53df8b3b0fad55e0f03e0310c8085c4
65,961
def rem_var(var, subs): """Deletes any substitutions of var in subs.""" newsubs = subs.copy() try: del newsubs[var.constant()] except KeyError: pass try: del newsubs[var.variable()] except KeyError: pass return newsubs
161c62470ad648e2d062d021b528ece4e1555509
65,962
def lookupToString(lookup_dict): """ Convert the lookup dict into a string. e.g.: {"field1": "a", "field2": "b"} -> "field1=a,field2=b" """ return ",".join("%s=%s" % (k, v) for k, v in lookup_dict.items())
204bf2b29a5861ed354bcc49a442c032f756d595
65,963
import requests def post_kalliope_poststuk_in(path, session, params): """ Perform the API-call to send a new poststuk to Kalliope. :param path: url of the api endpoint that we want to send to :param session: a Kalliope session, as returned by open_kalliope_api_session() :param url_params: dict of url parameters for the api call :returns: response dict """ r = session.post(path, files=params) if r.status_code == requests.codes.ok: return r.json() else: try: errorDescription = r.json() except Exception as e: errorDescription = r raise requests.exceptions.HTTPError('Failed to post Kalliope poststuk-in (statuscode {}): {}'.format(r.status_code, errorDescription))
5e48d49c8ae0faeec4a4f3514651cacedc22db82
65,974
def match_string(match): """A helper function that takes in a match dictionary and returns a string representation of it""" return f"match({match['words']}, {match['times']})"
809ff5175b96847706b04b915ec9e42da9bf598a
65,979
def _jinja_finalize(output): """ Provide a finalize function for Jinja that suppresses the output of `None` Returns: `output` or empty string (but not None) """ return output if output else ''
1af6e7d2d15aa794174f85d9ac8a3ecc0778d981
65,980
def _ral_contains_ ( self , i ) : """Check the presence of element or index in the list """ if isinstance ( i , int ) : return 0<= i < len(self) return 0 <= self.index ( i )
54ec86b464405e8a5334641e598a95173b64ee74
65,985
def merge_dicts(groups): """Merge a sequence of dicts together into once dict. The first item wins.""" if not isinstance(groups, list): groups = list(groups) return dict(x for g in reversed(groups) for x in g.iteritems())
5111825fe84635238f99c4cfc8c92eb27320bd95
65,987
def create_independent_disk(client, vdc, name, size, description): """Helper method to create an independent disk in a given orgVDC. :param pyvcloud.vcd.client.Client client: a client that would be used to make ReST calls to vCD. :param pyvcloud.vcd.vdc.VDC vdc: the vdc in which the disk will be created. :param str name: name of the disk to be created. :param str size: size of the disk to be created in bytes. :param str description: description of the disk to be created. :return: id of the created independent disk. :rtype: str """ disk_sparse = vdc.create_disk( name=name, size=size, description=description) client.get_task_monitor().wait_for_success(disk_sparse.Tasks.Task[0]) # clip 'urn:vcloud:disk:' from the id returned by vCD. return disk_sparse.get('id')[16:]
ba2766d0875662af57450bd1aaae4c69ff43d16c
65,992
from typing import Dict def generate_policy_block(title: str, summary: str, message: str) -> Dict: """Generate a Slack message block.""" return { "text": summary, "blocks": [ { "type": "header", "text": {"type": "plain_text", "text": summary}, }, {"type": "divider"}, { "type": "section", "text": { "type": "mrkdwn", "text": f"*{title}*", }, }, { "type": "section", "text": { "type": "mrkdwn", "text": f"```{message}```", }, }, ], }
2fb8b4d90e693417f678c7f77d0960e7448f3bad
65,994
import torch def _static(data: torch.Tensor, current_thresh: float, **kwargs) -> float: """ Passes through the specified input ``current_threshold``. Parameters ---------- data: torch.Tensor Pytorch tensor containing the data. current_thresh: float The threshold value. Returns ------- float The threshold value. """ return current_thresh
5a702d16b8761e5c9710803c1c777200269c9a85
65,998
def is_abstract_method(attr): """Returns True if the given object has __isabstractmethod__ == True. """ return (hasattr(attr, "__isabstractmethod__") and getattr(attr, "__isabstractmethod__"))
bfbe6a94c020e15fa728b9bcb108b2e665fc86ed
66,000
def get_otool_path(otool_line): """Parse path from a line from ``otool -L`` output. This **assumes** the format, but does not check it. Args: otool_line (str): A dependency (or ``install_name``) from ``otool -L`` output. Expected to be of the form '\t{PATH} (compatibility ...)'. Returns: str: The ``PATH`` in the ``otool_line``. """ parts = otool_line.split() return parts[0]
865bd1190e6c325687c73bbae98af59c32526f3b
66,004
def decode(obj): """decode an object""" if isinstance(obj, bytes): obj = obj.decode() return obj
649fc6b46b5b9609acbb7cd99fa66180411af055
66,013
from typing import Union import json def http_response(data: dict, status_code: Union[str, int] = '200') -> dict: """return http response Args: data: object to return. status_code: HTTP status code. Defaults to '200'. """ resp = { 'statusCode': str(status_code), 'headers': { 'Content-Type': 'application/json', 'Access-Control-Allow-Origin': 'papertesla.com' }, 'body': json.dumps(data) } return resp
4331f3f967e591b66197c4c92ea2054cb8ddde99
66,019
def model_cls_path(model): """ Return the dotted path name of a class. """ return "{}.{}".format(model.__module__, model.__qualname__)
e95baa9b4c87b22c379899c8d88efb1d0025b52a
66,021
def items(json_struct): """ Gets all items of the json_struct """ return json_struct._data.items()
eeada3b813dab22185068de8390f2ffa02dec065
66,022
def graphite_ocp_Enertech_Ai2020_function(sto): """ Graphite Open Circuit Potential (OCP) as a a function of the stochiometry. The fit is taken from the Enertech cell [1], which is only accurate for 0.0065 < sto < 0.84. References ---------- .. [1] Ai, W., Kraft, L., Sturm, J., Jossen, A., & Wu, B. (2020). Electrochemical Thermal-Mechanical Modelling of Stress Inhomogeneity in Lithium-Ion Pouch Cells. Journal of The Electrochemical Society, 167(1), 013512. DOI: 10.1149/2.0122001JES Parameters ---------- sto: double Stochiometry of material (li-fraction) Returns ------- :class:`pybamm.Symbol` OCP [V] """ p1 = -2058.29865 p2 = 10040.08960 p3 = -20824.86740 p4 = 23911.86578 p5 = -16576.3692 p6 = 7098.09151 p7 = -1845.43634 p8 = 275.31114 p9 = -21.20097 p10 = 0.84498 u_eq = ( p1 * sto ** 9 + p2 * sto ** 8 + p3 * sto ** 7 + p4 * sto ** 6 + p5 * sto ** 5 + p6 * sto ** 4 + p7 * sto ** 3 + p8 * sto ** 2 + p9 * sto + p10 ) return u_eq
e9b4712d679b9a6683c3ffacecd5bfb7e8e1718a
66,027
def add(a,b): """adds two things, of course""" return a+b
11e025985d20270b4cb8c6158516b69dfc808048
66,028
def get_node_attribute_value(G, node1, node_attribute=None): """ Get Node Attribute Value Returns node attribute as a float. Otherwise 0.0 is returned Input ----- G - networkx graph node1 - node node_attribute=None - attribute of node required Return ------ node attribute as a float """ try: node_data = G.node[node1][node_attribute] return float(node_data) except: pass return 0.0
e1befcab677d93c1d8a3a90d9c3e51bb2df61c30
66,029
from typing import Callable from typing import Union def wait_until_valid_input( prompt: str, check: Callable[[str], str], sanitize: Union[Callable[[str], str], None] ) -> str: """ Asks `prompt` until an input is received which returns True for `check`. Parameters ---------- prompt: str message to display check: Callable[[str], str] function to call with the given input, that provides an error message if the input is not valid otherwise, and False-like otherwise. sanitize: Callable[[str], str], optional A function which attempts to sanitize the user input (e.g. auto-complete). Returns ------- valid input """ while True: response = input(prompt) if sanitize: response = sanitize(response) error_message = check(response) if error_message: print(error_message, end="\n\n") else: return response
2bc8042137823f439a861abf014c0d0574308f3f
66,030
import unicodedata, re def make_file_name(s): # adapted from # https://docs.djangoproject.com/en/2.1/_modules/django/utils/text/#slugify """ Normalizes string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens. """ s = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore') s = s.decode() s = re.sub(r'[^\w\s-]', '', s).strip().lower() s = re.sub(r'[-\s]+', '-', s) return s
0afd3cf8021c08647f9ff10cb752ca5b6799e2ca
66,031
import re def _re_escape(s): """Escape regex control characters.""" escaped = re.sub(r"[(){}\[\].*?|^$\\+-]", r"\\\g<0>", s) return escaped
1e751e05e2feab0b6ff5e3d3cdb1dc16becb8dcf
66,036
from pathlib import Path def find_rom(program): """Find ROM in current directory or in ROMs directory.""" rom = Path(program) if rom.is_file(): return rom rom = Path(__file__).parent.joinpath("roms", program) if rom.is_file(): return rom
f7aab8b0bd839da84138ea61dc5bd75484eda9d9
66,037
import logging def _find_internal_ip_interface(iface_list): """Find an IP address that is internal from GCE. Args: iface_list: a list of GCE networkInterface. Returns: IP address string or None. """ logger = logging.getLogger(__name__) for iface in iface_list: try: ip_addr = iface['networkIP'] if ip_addr != '127.0.0.1': return ip_addr except KeyError: logger.warning('Network Interface has no "networkIP": %s', iface) logger.warning('Could not find internal IP interface in %s:', iface_list) return None
32cac2db32ec98ef736861c7178710855612bcfa
66,041
def ensure_binary(s, encoding="utf-8", errors="strict"): """ Coerce ``s`` to bytes. * `str` -> encoded to `bytes` * `bytes` -> `bytes` :param s: the string to convert :type s: str | bytes :param encoding: the encoding to apply, defaults to "utf-8" :type encoding: str :param errors: set a different error handling scheme; other possible values are `ignore`, `replace`, and `xmlcharrefreplace` as well as any other name registered with :func:`codecs.register_error`. Defaults to "strict". :type errors: str :raises TypeError: if ``s`` is not str or bytes type :return: the converted string :rtype: str """ if isinstance(s, str): return s.encode(encoding, errors) elif isinstance(s, bytes): return s else: raise TypeError("not expecting type '%s'" % type(s))
df9f6fce8675b2ead04a5a25f4248ea41fe1e2b3
66,044
def postorder_DFT(tree, nodelist): """ Post order traversal on binary RST tree :type tree: SpanNode instance :param tree: an binary RST tree :type nodelist: list :param nodelist: list of node in post order """ if tree.lnode is not None: postorder_DFT(tree.lnode, nodelist) if tree.rnode is not None: postorder_DFT(tree.rnode, nodelist) nodelist.append(tree) return nodelist
b3b65ce98ceb227ee743b3b40e0ee5f21ed1e56c
66,050
import functools import warnings def deprecated_func(func): """Deprecates a function, printing a warning on the first usage.""" # We use a mutable container here to work around Py2's lack of # the `nonlocal` keyword. first_usage = [True] @functools.wraps(func) def wrapper(*args, **kwargs): if first_usage[0]: warnings.warn( "Call to deprecated function {}.".format(func.__name__), DeprecationWarning, ) first_usage[0] = False return func(*args, **kwargs) return wrapper
c99064c3320ab50b17fe8e939b5309141188fa73
66,051
def describe_image(pred_df,fname): """A convenience function; writes a string that describes the tiles in a full-sized image. It is designed to work with a prediction dataframe that has the following columns: filename, tile_row, tile_col, empty, category_1, category_2...category_n, where the filename is the original full-size image filename, stem only (no extension or path) and the values are 1/0 for each category. """ ostring = '' #the description string we will build tiles_mask = pred_df[pred_df['filename']==fname].index tilerows = pred_df.loc[tiles_mask] ntiles = len(tilerows) nempty = sum(pred_df.loc[tiles_mask]['empty']) if(ntiles == nempty): ostring = 'empty' else: non_empty_mask = pred_df[(pred_df['filename']==fname) & (pred_df['empty']==0)].index nonemptyrows = pred_df.loc[non_empty_mask] for i in range(len(nonemptyrows)): cn = nonemptyrows.iloc[i].index #index of the row = the column names. cv = nonemptyrows.iloc[i].values #values in this row rowcol = '[' + str(cv[1]) + ']' + '[' + str(cv[2]) + ']' #row, col written as '[0][5]' etc. sumvals = sum(cv[3:]) obj_str = rowcol + ':' #If none of the categories are 1 including the 'empty' row, then the model is uncertain if sumvals == 0: obj_str += 'uncertain' #else we compile a string of whatever is in each tile else: for j in range(4,len(cn)): if cv[j] > 0: obj_str += cn[j] + ' ' if len(obj_str) > 0: ostring += ' ' + obj_str return ostring
4563fd859432b9f5f1be38a20aa641dcfcd67955
66,054
def calc_first_assists(data): """ Calculate the total traditional and weighted first assist for all players, grouped by player id. Author: Rasmus Säfvenberg Parameters ---------- data : pandas.DataFrame A data frame as retrieved by weighted.get_data() Returns ------- weighted_first_assists : pandas.DataFrame A data frame with total and weighted first assists per player. """ # Get required columns first_assists = data[["FirstAssistId", "reward"]].copy() # Convert from wide to long and have each assist (first only) as a row first_assists = first_assists.rename(columns={"FirstAssistId": "AssistId"}).\ melt(id_vars="reward").\ rename(columns={"value": "PlayerId"}).drop("variable", axis=1) # Intialize new columns that means 1 assist per event. first_assists["AssistedGoals"] = 1 # Calculate number of assists and weighted assists per player weighted_first_assists = first_assists.groupby("PlayerId")[["AssistedGoals", "reward"]].\ sum().reset_index().rename(columns={"AssistedGoals": "First_Assists", "reward": "WeightedFirst_Assists"}).\ sort_values("WeightedFirst_Assists", ascending=False) return weighted_first_assists
2772fa3b2ebe68c408695482d5ea5d9615ecf994
66,055
import re def quote_apostrophes(value): """Format single quotes for SQLite. See: * http://www.sqlite.org/lang_expr.html * http://stackoverflow.com/questions/603572/ how-to-properly-escape-a-single-quote-for-a-sqlite-database """ return re.sub("'", "''", value)
7037c09460fc0e67b20abd5b2b7d89505527a7a0
66,056
def find_overlap(genotypes, df, q=0.1, col='code'): """Given a list of genotypes, df and a q-value, find DEG common to all.""" # find only DE genes: sig = df[(df[col].isin(genotypes)) & (df.qval < q)] grouped = sig.groupby('target_id') genes = [] for target, group in grouped: # make sure the group contains all desired genotypes all_in = (len(group[col].unique()) == len(genotypes)) if all_in: genes += [target] return genes
fca075cd10f0c45de78f92bced71674a13f2a3be
66,062
def aws_event_tense(event_name): """Convert an AWS CloudTrail eventName to be interpolated in alert titles An example is passing in StartInstance and returning 'started'. This would then be used in an alert title such as 'The EC2 instance my-instance was started'. Args: event_name (str): The CloudTrail eventName Returns: str: A tensed version of the event name """ mapping = { "Create": "created", "Delete": "deleted", "Start": "started", "Stop": "stopped", "Update": "updated", } for event_prefix, tensed in mapping.items(): if event_name.startswith(event_prefix): return tensed # If the event pattern doesn't exist, return original return event_name
5e0e21ac84cc5af2681def93bb1fd014c0700113
66,064
def generate_label(path, hexsha): """Generate label field.""" return f"{path}@{hexsha}"
c813cc220d13891b17735034f59b7a3704086437
66,065
def evidence_from_inversion_terms( chi_squared, regularization_term, log_curvature_regularization_term, log_regularization_term, noise_normalization, ): """Compute the evidence of an inversion's fit to the datas, where the evidence includes a number of \ terms which quantify the complexity of an inversion's reconstruction (see the *inversion* module): Likelihood = -0.5*[Chi_Squared_Term + Regularization_Term + Log(Covariance_Regularization_Term) - Log(Regularization_Matrix_Term) + Noise_Term] Parameters ---------- chi_squared : float The chi-squared term of the inversion's fit to the observed datas. regularization_term : float The regularization term of the inversion, which is the sum of the difference between reconstructed \ flux of every pixel multiplied by the regularization coefficient. log_curvature_regularization_term : float The log of the determinant of the sum of the curvature and regularization matrices. log_regularization_term : float The log of the determinant o the regularization matrix. noise_normalization : float The normalization noise_map-term for the observed datas's noise-map. """ return -0.5 * ( chi_squared + regularization_term + log_curvature_regularization_term - log_regularization_term + noise_normalization )
6e832d7c1aba5e5004df9ce9e250122ed113569e
66,072
def insert(value: int, in_list: list) -> list: """ Insert a value in an ordered list :param value: value to insert :param in_list: list where we add the value :return: the new list """ for idx in range(len(in_list)): if in_list[idx] >= value: return in_list[:idx] + [value] + in_list[idx:] return in_list + [value]
4e3b11de46d077c7e9eb4b130f5e2edeefd1c7b8
66,073
def left(index: int) -> int: """Gets left descendant's index. """ return 2 * index
c324db352c8d876c69ebdf80c2e764378b6e0dd1
66,077
def loadDset_str(dset): """ Loads a dataset containing a string array """ val = str(dset[()]) return val
e406c37ff66fb6c458d6b5c0bc14ba8751c772d4
66,078
import socket def get_random_unopen_port(local_addr: str) -> int: """ :param local_addr: ip to bind, 127.0.0.1 :return: unopen port """ with socket.socket() as sock: sock.bind((local_addr, 0)) ip, port = sock.getsockname() return port
e5428b1edb969ec6e82d65f2e14d400233de396b
66,081
def timestamp_in(new, initial_boundary, end_boundary) -> bool: """ Check if a new message timestamp is within the initial-end boundary and return True or False. :param new: new message to consider :param initial_boundary: trace msg that defines the initial boundary :param end_boundary: trace msg that defines the end boundary return: bool """ init_ns = initial_boundary.default_clock_snapshot.ns_from_origin end_ns = end_boundary.default_clock_snapshot.ns_from_origin new_ns = new.default_clock_snapshot.ns_from_origin if (init_ns <= new_ns) and (new_ns <= end_ns): return True else: return False
c29a87f7f5a01251759c4f335b920009b1aa2857
66,084
import torch def load_checkpoint(checkpoint_file, model, model_opt): """ Loads a checkpoint including model state and running loss for continued training """ if checkpoint_file is not None: checkpoint = torch.load(checkpoint_file) state_dict = checkpoint["state_dict"] start_iter = checkpoint['iter'] running_loss = checkpoint['running_loss'] opt_state_dict = checkpoint['optimizer'] model_opt.load_state_dict(opt_state_dict) for state in model_opt.state.values(): for key, value in state.items(): if isinstance(value, torch.Tensor): state[key] = value.cuda() model.load_state_dict(state_dict) else: start_iter = 1 running_loss = 0 return start_iter, running_loss
ea9239646bb74cf5b9ff9a2ffa39e2ab96d9a2ed
66,087
def clean_latex_name(label): """ Convert possible latex expression into valid variable name """ if not isinstance(label,str): label = str(label) # -1- Supress \ label = label.replace('\\','') # -2- Supress '{' .. '}' label = label.replace('{','') label = label.replace('}', '') # -3- Replace '^' by '_' label = label.replace('^','_') # -4- Replace ',' by '_' label = label.replace(',','_') return label
f6994f3ad6e03498ed5e83cc0d626695fa638d07
66,088
def rec_deps(services, service_name, start_point=None): """ return all dependencies of service_name recursively """ if not start_point: start_point = service_name deps = services[service_name]["_deps"] for dep_name in deps.copy(): # avoid A depens on A if dep_name == service_name: continue dep_srv = services.get(dep_name, None) if not dep_srv: continue # NOTE: avoid creating loops, A->B->A if start_point and start_point in dep_srv["_deps"]: continue new_deps = rec_deps(services, dep_name, start_point) deps.update(new_deps) return deps
b394b32dbc713ec9b73ea8ef225dc41e6d8813bc
66,091
import re def contain_words(text, words): """Check if a string contains any of given words Parameters ---------- text : str query string words : list of str target words Returns ------- bool whether string contains at least one of given words """ return re.search(r'\b{}\b'.format('|'.join(words)), text, re.IGNORECASE) is not None
ba9ce607ba328b53fcac9d64279fdb0354db98ff
66,096
def bdev_opal_get_info(client, bdev_name, password): """Get opal locking range info. Args: bdev_name: name of opal vbdev to get info password: admin password Returns: Locking range info. """ params = { 'bdev_name': bdev_name, 'password': password, } return client.call('bdev_opal_get_info', params)
095e6739a65c07d5edd52fc3e0354112ad1a9a5d
66,098
from typing import Dict from typing import List from typing import Any from functools import reduce import operator def _get_by_path( obj: Dict, key_sequence: List[str] ) -> Any: """Access a nested dictionary by sequence of keys. Args: obj: (Nested) dictionary. key_sequence: Sequence of keys, to be applied from outside to inside, pointing to the key (and descendants) to retrieve. Returns: Value of innermost key. """ return reduce(operator.getitem, key_sequence, obj)
15fecb203eaa82ad77759aae44c9e4c3cd091a06
66,099
import torch def PositionEncoder(x): """Positional Encoding layer with fixed encoding vector based on sin and cos, as in http://nlp.seas.harvard.edu/2018/04/03/attention.html#position-wise-feed-forward-networks Implemented as a function instead of a module because we may not know the shape of x (in particular, the text_len dimension) before hand. This function returns just the fixed PE vector instead of the sum x + PE: This is to avoid computing PE again and again in repeated encoder blocks. Arguments: x: input tensor of shape (batch_size, text_len, input_dim) Output: pe: tensor of shape (text_len, input_dim) pe[position, 2i] = sin( position * 10000^(- 2i / input_dim) ) pe[position, 2i+1] = cos( position * 10000^(- 2i / input_dim) ) """ _, text_len, input_dim = x.size() position = torch.arange(text_len, dtype = torch.float, device = x.device) ## shape (text_len, ) div_term = torch.arange(0, input_dim, 2, dtype = torch.float, device = x.device) ##shape (input_dim//2, ) div_term = torch.pow(10000, - div_term/input_dim) ## Compute angles: tensor of shape (text_len, input_dim //2) as the outer product of position and div_term ## angles[position, i] = position * 10000^(- 2i / input_dim) angles = torch.ger(position, div_term) ## Interweave sin(angles) and cos(angles) ## shape (text_len, input_dim) pe = torch.stack( (torch.sin(angles), torch.cos(angles)), dim = 2).view(text_len, input_dim) return pe
0f2e0a8d6af8894561fe0abdf104c46135cd8dd3
66,101
def _parse_display(display): """Parse an X11 display value""" try: host, dpynum = display.rsplit(':', 1) if host.startswith('[') and host.endswith(']'): host = host[1:-1] idx = dpynum.find('.') if idx >= 0: screen = int(dpynum[idx+1:]) dpynum = dpynum[:idx] else: screen = 0 except (ValueError, UnicodeEncodeError): raise ValueError('Invalid X11 display') from None return host, dpynum, screen
b9306cf93e207d6f699114b583558ce089d100b5
66,103
def convert_dot_notation(key, val): """ Take provided key/value pair and convert it into dict if it is required. """ split_list = key.split('.') if len(split_list) == 1: # no dot notation found return key, val split_list.reverse() newval = val item = None for item in split_list: if item == split_list[-1]: return item, newval newval = {item:newval} return item, newval
4b5b2ec1c8c50e72bad192a9924d7b6ec516d20d
66,105
def _set_star_value(star_code, number_stars): """ Internal function that is used for update the number of active stars (that define notebook difficulty level) ---------- Parameters ---------- star_code : str String with the HTML code to be changed. number_stars : int Number of stars that will be active. Returns ------- out : str It is returned a string with the HTML code after updating the number of active stars. """ for star in range(1, 6): if star <= number_stars: star_code = star_code.replace("fa fa-star " + str(star), "fa fa-star " "checked") else: star_code = star_code.replace("fa fa-star " + str(star), "fa fa-star") return star_code
52d793a1c4a38baa28adc52a055fc2199b31932b
66,106
import random def perturbed(v, jitter): """Return `v`, with -jitter..jitter randomly added.""" return v + 2*jitter * random.random() - jitter
16b5cbcbe8c63efdca3a7d28250c27067618016a
66,107
from typing import Any def tuple_gen(new_val: Any, ori_tuple: tuple, axis: int) -> tuple: """ Returns a tuple similar to ori_tuple but with new_val at a specifix axis """ lst = list(ori_tuple) lst[axis] = new_val return tuple(lst)
1d4e21c501af4380a58e1fe33a11663a8cae0976
66,113
import struct def mac2str(mac): """Converts mac address to string . Args: mac: 6 bytes mac address Returns: readable string """ return '%02x:%02x:%02x:%02x:%02x:%02x'%tuple(int(x) for x in struct.unpack('BBBBBB', mac))
01a51ef8a787d9a4e2ee0262698ef481bcadb6ff
66,114
def filter_by_labels(element, non_rated_threshold = 10): """Filters notes by their labeling status. All rated notes are kept, nonrated notes are kept if the regex parser identifies more labels than the specified threshold. Args: element: APData containing the labels. non_rated_threshold: Total number of labels required for nonrated notes. Returns: bool, should this note be kept. """ values = element[1] n = len(values.labeled_char_spans) return (values.is_rated and n > 0) or (n > non_rated_threshold)
310b5f1158d83d87151b1a9b05ccc72aedf0c6f6
66,117
import base64 import json def cache_key(working_directory, arguments, configure_kwargs): """Compute a `TensorBoardInfo.cache_key` field. The format returned by this function is opaque. Clients may only inspect it by comparing it for equality with other results from this function. Args: working_directory: The directory from which TensorBoard was launched and relative to which paths like `--logdir` and `--db` are resolved. arguments: The command-line args to TensorBoard, as `sys.argv[1:]`. Should be a list (or tuple), not an unparsed string. If you have a raw shell command, use `shlex.split` before passing it to this function. configure_kwargs: A dictionary of additional argument values to override the textual `arguments`, with the same semantics as in `tensorboard.program.TensorBoard.configure`. May be an empty dictionary. Returns: A string such that if two (prospective or actual) TensorBoard invocations have the same cache key then it is safe to use one in place of the other. The converse is not guaranteed: it is often safe to change the order of TensorBoard arguments, or to explicitly set them to their default values, or to move them between `arguments` and `configure_kwargs`, but such invocations may yield distinct cache keys. """ if not isinstance(arguments, (list, tuple)): raise TypeError( "'arguments' should be a list of arguments, but found: %r " "(use `shlex.split` if given a string)" % (arguments,) ) datum = { "working_directory": working_directory, "arguments": arguments, "configure_kwargs": configure_kwargs, } raw = base64.b64encode( json.dumps(datum, sort_keys=True, separators=(",", ":")).encode("utf-8") ) # `raw` is of type `bytes`, even though it only contains ASCII # characters; we want it to be `str` in both Python 2 and 3. return str(raw.decode("ascii"))
38985c62d0b3161163daa0fa4dccad80afa7d28b
66,122
def read_menu() -> int: """ Reads number and selects a menu. :return: integer representing the chosen menu. """ return int(input())
28eaaca1c5ca7cc31a64c10c3204e94c00f7877a
66,127
import codecs def read_html_from_file(path_to_file): """Reads UTF-8 HTML file from disk and returns it as String. Args: path_to_file (str): Path to the file Returns: str: content of file as string """ f = codecs.open(path_to_file, 'r', 'utf-8') print("reading file {}...".format(path_to_file)) string = f.read() f.close() return string
c18cfa5734d6802c3d490c40e02ac33af91c5c09
66,128
def _extract_keys_to_unique_list(lists_of_dictionaries): """ Extract the keys for a list of dictionaries and merge them into a unique list. :param lists_of_dictionaries: List of dictionaries to pull unique keys from. :type lists_of_dictionaries: list(dict) :return: Merged list of keys into a unique list. :rtype: list """ merged_list = list(lists_of_dictionaries[0].keys()) for idx, d in enumerate(lists_of_dictionaries): if idx != 0: merged_list = merged_list + list(set(list(d.keys())) - set(merged_list)) return merged_list
9f94d46cec063a46fd4b506cc08e5cf20b3b98ac
66,129
def _find(item, command): """ Return true if any of the item's keywords is in the command string. """ return any(keyword in command for keyword in item["keywords"])
6909a621e0a41ddd692ae0517965957e470af705
66,130
def build_attr_dict(attr_triples): """ build attribute dictionary (dict[entity][attr] = value) """ d = dict() for e, a, v in attr_triples: d.setdefault(e, dict()) d[e][a] = v return d
83c38666db9984ff32a5fb1cece124fb4a277bfc
66,131
import requests from typing import Any def tid_repo_prompt(image_data_url: str): """ Prompt for TID repo image selection :param image_data_url: base url of the TID repo imaegs :return: download link of the image or None """ entry_number: int = int(input("Please enter image number: ")) - 1 try: response: requests.Response = requests.get(image_data_url) except Exception as error: print("[red]Perhaps you are not connected to the internet. Mind checking it again?") else: if not response.ok: print(f"[red]Oops, something went wrong {response.status_code}") return image_data: Any = response.json() if entry_number >= len(image_data): entry_number: int = 1 file_name = image_data[entry_number]["image_name"] file_type = image_data[entry_number]["file_type"] return f"https://raw.githubusercontent.com/Muhimen123/TID/main/images/{file_name}.{file_type}"
049bd987d8f10bee0e01a7c660bd2bf949846de7
66,134
def _validate_subset(of, allow_none=False): """ Create validator to check that an attribute is a subset of ``of``. Parameters ---------- of: str Attribute name that the subject under validation should be a subset of. Returns ------- validator: Callable Validator that can be used for ``attr.ib``. """ def _v(instance, attribute, value): if allow_none and value is None: return other_set = set(getattr(instance, of)) if isinstance(value, str): my_set = {value} else: my_set = set(value) too_much = my_set - other_set if too_much: raise ValueError( "{attribute} must be a subset of {of}, but it has additional values: {too_much}".format( attribute=attribute.name, of=of, too_much=", ".join(sorted(too_much)), ) ) return _v
5a22e950e57ca18dc597f64e1296048001a3739a
66,137
import copy def image_filter_range(json_dict, key, min_value=None, max_value=None, return_copy=True): """Return a version of `json_dict` where only `io` list items (images) with `key`'s value in range `[min_velue ; max_value]` are kept.""" if return_copy: json_dict = copy.deepcopy(json_dict) if min_value is not None: json_dict["io"] = [image_dict for image_dict in json_dict["io"] if key in image_dict and min_value <= image_dict[key]] if max_value is not None: json_dict["io"] = [image_dict for image_dict in json_dict["io"] if key in image_dict and image_dict[key] <= max_value] return json_dict
6f136179c4b7d7d7ab8db4e5c22d52e48cb844ea
66,141
def read_requirements_file(filename): """Read requirements file and extract a set of library requirements. Parameters ---------- filename : `str` Filename of requirements file. Returns ------- requirements : array_like(`str`, ndim=1) Extracted set of library requirements. """ with open(filename, 'r') as file_obj: requirements = file_obj.readlines() requirements = [line.strip() for line in requirements if line.strip()] return requirements
003473365f7625ef5de1c1fde4a0ae52e2cc1682
66,142
def element_base_url(element): """Return the URL associated with a lxml document. This is the same as the HtmlElement.base_url property, but dont’t want to require HtmlElement. """ return element.getroottree().docinfo.URL
25889f821a57b3f70f15130574fb0576d5d9c589
66,143
import math def calculate_number_of_bins_sturges(data): """Calculate the number of bins using Sturges formula.""" return int(math.ceil(math.log(len(data), 2) + 1))
a7898a207414c2532813f6a27a879cb130ff4cea
66,144
import operator def normal_slice(slice: slice) -> bool: """is the object a conventional slice item with integer values.""" return all( isinstance(x, (int, type(None))) for x in operator.attrgetter(*"start stop step".split())(slice) )
66163a72726ef9390ee837588fa793fa20fe7c1e
66,145
def display_word(word): """ Creates dashed display ex: _ _ _ _ _ """ return '{}'.format(len(word) * '_ ')
5b62792bd351fdf09d85e5183679abed0a5ff966
66,147
def is_sublist(smallList, bigList): """ Checks if smallList is a sublist of bigList """ def n_slices(n, list_): for i in range(len(list_)+1-n): yield(list_[i:i+n]) for slice_ in n_slices(len(smallList), bigList): if slice_ == smallList: return True return False
3dd582acef24c604a2ff8f1da52a2877c45a5a20
66,148
import pickle def read_encoded_ctx_file(encoded_ctx_file: str): """ Returns dictionary containing the encoded passages and their vector embeddings :param encoded_ctx_file: :return: """ with open(encoded_ctx_file, mode="rb") as f: p_emb = pickle.load(f) # create dictionary, so that i can lookup embeddings p_emb_dict = {} for passage in p_emb: p_emb_dict.update({passage[0]: passage[1]}) return p_emb_dict
23bbf7e76d53c3b3ea8228b9e8b2d53d932bae33
66,149
def create_futures_regex(input_symbol: str) -> str: """Creates a regular expression pattern to match the standard futures symbology. To create the regular expression pattern, the function uses the fact that within the ICE consolidated feed all the standard futures contracts are identified by the root symbol (a unique mnemonic based on the exchange ticker or the ISIN, where no exchange ticker is available), prefixed with the type and the optional session indicator, a backslash, and a delivery date (formatted as MYYdd, where M is the month code, YY are the last two digits of the year, and dd is 2-digit day of the month that is used only for those futures where the day of the month is required to identify the security). The function logic allows the user to pass a complete futures name, or to pass the root symbol prefixed by the type and optional session indicator, followed by a backslash and the * wildcard flag. In the former case, the resulting regex expression will be such that it will match only the specific security that is passed as an input (for example, by passing F:FDAX\\H21, the resulting regular expression will only match the DAX futures contract with expiration in March 2021). If only the symbol root (with all the necessary prefixes) followed by a backslash and the * wildcard flag, is passed as an input, the resulting regex will be such to allow matching all the possible combinations of month year (and optionally day) of expiration. Parameters ---------- input_symbol: str A standard futures symbol consisting of the root symbol prefixed with the type identifier (F) and optional session indicator. If the user wants the function to produce a regular expression that can match all the possible combinations of month, year (and optionally day) expirations, then the root symbol will be followed by a backslash and the * wildcard flag (for example F:FDAX\\* will result in a regular expression that will match all the possible combinations of root symbol, month code, year and eventually day codes). Alternatively, if the user is only interested in creating a regular expression that matches literally only a specific contract, the passed instrument symbol (prefixed with the type identifier and optional session indicator) will be followed by a backslash and a specific maturity, identified by the month code followed by the 2-digit year code and the 2-digit day code for those contracts that are identified also by the day of the month. Returns ------- str Depending on the input symbol, the function returns a regular expression pattern that either matches literally a specific security symbol or one that matches all the possible maturities of the root symbol passed as an input. """ if not input_symbol.endswith('*'): symbol_components = input_symbol.split("\\") return rf"{symbol_components[0]}\\{symbol_components[1]}" else: symbol_root = input_symbol.split("\\")[0] return rf"{symbol_root}\\[A-Z][0-9]{{2,4}}"
3fb6da6dd48d426cff093ea7af5e387c5204e4f6
66,150
def GetReturnRates(price, days=1): """ Returns percent return rate for last N days. """ startPrice = price[-1 - days] endPrice = price[-1] return ((endPrice - startPrice) * 100) / startPrice
d9410064a69e48c9b1324b47d5498aff17f7b988
66,157
def encode_entry(entry): """Convert from an entry to a JSON-representation.""" return [ entry.key, entry.settings, entry.constraints.to_json(), ]
3ee292c11a55d08af5080138d877f42baa6a320c
66,162
import json def json_formatter(result, _verbose): """Format result as json.""" return json.dumps(result, indent=4, sort_keys=True)
61d191c5f40006d3dad88bf2000b67ff0735bc6f
66,164
def remap_response(recog_response): """ remap the responses to high (1), median (2), and low (3) level """ recog_response[recog_response == 6] = 1 recog_response[recog_response == 5] = 2 recog_response[recog_response == 4] = 3 return recog_response
12a5e1a6c05175a17f220a0651fa8427083e52c8
66,165