content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def normalize(data): """ Normalize data set to have zero mean and unit variance. Args data: A numpy array of arrays containing input or target data. Returns A normalized numpy array of arrays. """ return (data - data.mean(axis=0)) / data.var(axis=0)
d09f7bc81c5f6c5e1c593836bd758a3553f243ca
10,809
def coord(x_coordinate = 0, y_coordinate = 0): """function to form a coordinate string from x and y integers""" return '(' + str(x_coordinate) + ',' + str(y_coordinate) + ')'
65bb18ffefdaaa13b30d5237b9061b053a9e8a9d
10,811
def ListToDict(args): """ change a list to dict :param args: a list :type args: list :return: dict .. code-block:: python >>> a = [1,2,3,4] >>> print(ListToDict(a)) {1: {2: {3: 4}}} """ if not isinstance(args, list): return None if len(args) == 1: return args[0] else: return { args[0]:ListToDict(args[1:]) }
23f3ed1f47adbc842906a06d704150962b3f5a76
10,814
def score_filter(predictions, min_score): """Remove prediction bounding boxes with probability under a threshold Parameters ---------- predictions : dict all predictions min_score : int threshold score Returns ------- dict filtered predictions """ new_pred = {} new_pred['type'] = predictions['type'] new_pred['features'] = [] for feature in predictions['features']: if feature['properties']['score'] >= min_score: new_pred['features'].append(feature) return new_pred
28f8c0604f3dabc76ffbda911357d0bdd5bd5331
10,815
def consolidate(*levels): """ Arranges list of object dictionaries levels into a hierarchical data structure, based upon the attribute "parent" Must pass arguments in ascending order (e.g. children, parents, grandparents) """ # consolidated = list(levels) # modify copy consolidated = levels for i, lvl in enumerate(levels): try: parents = consolidated[i + 1] except IndexError: return consolidated[i] for name, obj in lvl.items(): p = obj.parent if p in parents: parents[p].children.append(obj) obj.grandparent = parents[p].parent return consolidated
7cb064d3651767732c979ba34952a0a8c8962eb0
10,816
import struct def decode_option(packet, offset): """Decode a navdata option.""" id_nr, size = struct.unpack_from("HH", packet, offset) end_offset = offset + size data = packet[offset + struct.calcsize("HH"):end_offset] return id_nr, data, end_offset
c0ada56ba1f227a9db74b6d090496b3cfdbb1041
10,817
import math def getsteps(tspan, h): """ Given a timespan and a timestep h, return the number of steps. """ t1, t2 = tspan return int(math.floor((t2-t1)/h))
a10e0a4302352be6eafb9fad8e00005b9c79dd27
10,818
from typing import Union def boolean(value: Union[bool, str, float] = False) -> bool: """ Return boolean from string. """ if isinstance(value, bool): return value elif isinstance(value, str): value = value.lower().strip()[0] if value in ["y", "t"]: return True if value in ["n", "f"]: return False raise ValueError("Cannot convert %r to Boolean" % value) elif isinstance(value, int): return value != 0 elif isinstance(value, float): if abs(value) < 0.01: return False else: return True else: raise ValueError("Cannot convert %r to Boolean" % value)
4f1db3e1224e0633c5d663e6ae85a8ae19c51624
10,819
import sys import os def get_ancestor_paths(path): """Returns a list of ancestors for a given path """ ancestors = [] stop_val = "/" if "darwin" in sys.platform: stop_val = "/" elif "win" in sys.platform: stop_val = ":\\" stop_len = len(stop_val) * -1 if os.path.exists(path): ancestors.append(path) path = os.path.dirname(path) if path[stop_len:] != stop_val: ancestors.extend(get_ancestor_paths(path)) return ancestors
b125fcc0b776ea2a272666f7a272f19ec116f394
10,820
def strip_noise_from_key_signature(key): """Removes any unneccessary characters (7,9,11,m,M etc...)""" #Change this to a map or something key = key.replace('9', '') key = key.replace('7', '') key = key.replace('5', '') key = key.replace('m', '') key = key.replace('M', '') return key
5aa910f96dce275f9e4b6af7926179e73a2c313d
10,821
def derived_something(): """ A docstring template to describe the function. This function is part of the unit test template Returns ------- An string with the word output """ print("This message shows that you have successfully imported \ the derived_something() function from the preprocessing module") output_vaule = 'output' return output_vaule
97586600645d87c557a1ebeb83e0f9e3a5c1381f
10,822
from typing import List import random import string def get_addresses(n=50) -> List[dict]: """ Generate random addresses """ def _get_address() -> dict: return { "name": "John Doe", "companyName": "Test Co", "streetAddress": "{} Test St".format(random.randint(10, 100)), "postCode": str((random.randrange(10**4, 10**5))), "city": "Test City", "state": "Test State", "country": "".join(random.choices(string.ascii_uppercase, k=2)), "phoneNumber": "+{}".format(random.randrange(10**9, 10**10)) } return [_get_address() for _ in range(n)]
e125abb419cfeb9e06c2c0930f5e3082baa01b72
10,823
import re def _prepare_date(from_date): """ Private function to prepare from_date by converting it to YYYY-MM-DD format. """ # check if from_date was provided and if it was provided in the right # format from_date_str = None if from_date is not None: if not isinstance(from_date, str): try: from_date_str = from_date.strftime('%Y-%m-%d') except AttributeError: raise ValueError( f"""from_date must be a string in the format YYYY-MM-DD or datetime. String provided: {from_date}" """ ) else: # regex template for YYYY-MM-DD pattern = re.compile("\\d{4}-\\d{2}-\\d{2}") match = pattern.match(from_date) if match is None: raise ValueError( f"""from_date must be a string in the format YYYY-MM-DD \ or datetime. String provided: {from_date}" """ ) from_date_str = from_date[0:10] return from_date_str
2766138027a1a2cc89e66370d792e2a317f6aa21
10,825
import copy import torch def get_loss(cfg_loss): """ Build the loss with the proper parameters and return it. Parameters ---------- cfg_loss : dict Dictionary containing the name of the loss to use and it's specific configs. Returns ------- loss_function : function The loss function. """ loss_args = copy.deepcopy(cfg_loss) # Import proper loss class if loss_args['name'] == 'BinaryFocalLoss': exec(f"from utils.loss import {loss_args['name']}") else: exec(f"from torch.nn import {loss_args['name']}") loss_class = eval(loss_args['name']) del loss_args['name'] # Convert to torch.tensor some argument that requires it. for arg_name in ['pos_weight', 'weight']: if arg_name in loss_args: loss_args[arg_name] = torch.tensor(loss_args[arg_name]) loss_function = loss_class(**loss_args) return loss_function
88bacc521eee93c2a8c8d73a829a57f5d02bfdbd
10,827
def _fix_params(params): """For v1 api -- True is True but False is a string""" for key, val in params.items(): if val is False or str(val).lower() == 'false': params[key] = 'False' elif str(val).lower() == 'true': params[key] = True return params
1629cf618153c10169e2b963c99f06ef2e453075
10,828
def split_nav_dataframe(nav_table, split_date): """ Split NAV pandas DataFrame into a training and a testing DataFrame according to a split_date, such that split_date becomes the last date of the training DataFrame. Args: - split_date (datetime.datetime) Returns: - train: the training DataFrame - test: the testing DataFrame """ assert split_date in nav_table.index.tolist() split_index = nav_table.index.tolist().index(split_date) train = nav_table.iloc[:split_index + 1] test = nav_table.iloc[split_index + 1:] return train, test
78c6ae901641b3508d2a9f9c790be6d576cf8458
10,829
def get_episodes_count(episode_list, max_val): """ This method is used to count the count of episode_list value which is bigger than 30 :param episode_list: :return: """ return len([x for x in episode_list if x > max_val])
fc49154c644c1807e818e0b598c680878e342767
10,830
def find_groups_in_cluster(clustervs, elementgroupList): """ A utility function to find vertices with the same cluster memberships. :param igraph.vertex clustervs: an igraph vertex instance :param list elementgroupList: a list containing the vertices to group :returns: a list-of-lists containing the groupings of the vertices """ clustervertex = set([v for v in clustervs]) return [vg for vg in elementgroupList if len(set(vg) & clustervertex) > 0]
7cc941d086d7be7c7395e21f6bde1cd4e5611851
10,831
import csv def readbalfour(fname): """returns list of lists representing balfour csv""" with open(fname, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar="\"") return list(reader)
d48071ff01bee67c13c38a6ca0ed22064b6bdca6
10,832
from typing import List import os def get_packages( path: str, ) -> List[str]: """Get list of packages under a path.""" packages = [] for file_or_dir in os.listdir(path): file_or_dir_path = os.path.join(path, file_or_dir) if os.path.isdir(file_or_dir_path): dir_name = file_or_dir dir_path = file_or_dir_path if ( not dir_name.startswith('.') and os.path.exists(os.path.join(dir_path, 'package.xml')) ): packages.append(dir_name) return packages
0bb6397cd80e3549ca6f0f74007367a40ba5d2c1
10,833
def unparse_vs(tup): """version list to string""" return '.'.join(map(str, tup))
4c4f4d8fb02bffd676b2a4d87118eaf55cb0afe8
10,834
import base64 def encryptstring(text, password): """ Encrypt a string according to a specific password. :type text: string :param text: The text to encrypt. :type pass: string :param pass: The password to encrypt the text with. """ enc = [] for i in enumerate(text): key_c = password[i[0] % len(password)] enc_c = chr((ord(i[1]) + ord(key_c)) % 256) enc.append(enc_c) return base64.urlsafe_b64encode("".join(enc).encode()).decode()
46142bc54828058bf131834fa13276da78e90842
10,835
import os def commonprefix(path1,path2): """Determine common prefix path for path1 and path2 Use this in preference to os.path.commonprefix as the version in os.path compares the two paths in a character-wise fashion and so can give counter-intuitive matches; this version compares path components which seems more sensible. For example: for two paths /mnt/dir1/file and /mnt/dir2/file, os.path.commonprefix will return /mnt/dir, whereas this function will return /mnt. Arguments: path1: first path in comparison path2: second path in comparison Returns: Leading part of path which is common to both input paths. """ path1_components = str(path1).split(os.sep) path2_components = str(path2).split(os.sep) common_components = [] ncomponents = min(len(path1_components),len(path2_components)) for i in range(ncomponents): if path1_components[i] == path2_components[i]: common_components.append(path1_components[i]) else: break commonprefix = "%s" % os.sep.join(common_components) return commonprefix
9122b8339ae53e4a646af7783dba388e83a41756
10,836
import torch def top_pool(x): # from right to left """ :param x:feature map x, a Tensor :return: feature map with the same size as x """ x_p = torch.zeros_like(x) x_p[:, :, :, -1] = x[:, :, :, -1] _, _, h, w = x.size() for col in range(w - 1, -1, -1): x_p[:, :, :, col] = x[:, :, :, col:].max(-1)[0] return x_p
7222d018a59270daf4041109e7979718ff4c68a6
10,838
def listofdict_topn_sorter(raw_listofdict, attr_key, reverse=True): """ 对 list of dict 数据结构排序 :param raw_listofdict: :param attr_key: 指定要排序的attr, eg 分数高低,成本高低. :param reverse: True 倒排从大到小. :return: 1) raw数据不变 2)排序后返回原始index 3)排序后可获得需求attr. """ sorted_res = sorted(enumerate(raw_listofdict), key=lambda x: x[1][attr_key], reverse=reverse) sorted_ids = [x[0] for x in sorted_res] sorted_attrs = [x[1][attr_key] for x in sorted_res] return sorted_ids, sorted_attrs
8dfb7a60dd7e904d99a3896908cd213d34e1a922
10,839
def check_padding(query): """ Check for missing padding in base64 encoding and fill it up with "=". :param query: :return: query """ missing_padding = len(query) % 4 if missing_padding: query += "=" * (4 - missing_padding) return query
d8ad3c96074d311dbd5ba17bb93d7ca7a8b5ccab
10,841
import inspect def _get_method_classname(m): """Get class name for method, assuming method is bound and class has __dict__.""" for k, v in inspect.getmembers(m): if k == "__qualname__": return v return "<unknown>"
88edafc4e3319ad32e2a77feed4dfb7e14f1bd4c
10,842
import subprocess def get_git_hash(): """Get short git hash, with "+" suffix if local files modified""" h = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]).strip().decode("utf-8") # Add '+' suffix if local files are modified exitcode, _ = subprocess.getstatusoutput("git diff-index --quiet HEAD") if exitcode != 0: h += "+" return "git" + h
ff05a71e4dd3001335e29de77c43df21a1698120
10,843
import sys import os import tempfile def can_symlink(): """Return whether running process can create symlinks.""" if sys.platform != 'win32': # Platforms other than Windows should allow symlinks without issues. return True if not hasattr(os, 'symlink'): # Older Python versions do not have `os.symlink` on Windows. return False test_source = tempfile.mkdtemp() test_target = test_source + 'can_symlink' try: os.symlink(test_source, test_target) except OSError: return False return True
531de5c7fe92ba5dbd6fb149500e143034189098
10,844
def align(l, alignto=4): """Aligned length to nearest multiple of 4.""" return (l + alignto - 1) & ~(alignto - 1)
2bef7f1c3486c9e633138178d80216bf750ff2ed
10,845
def table_exists(db_conn, table_name): """ Checks if a table matching table_name exist in the database """ cur = db_conn.cursor() tn = (table_name,) cur.execute("select name from sqlite_master where type='table' and name=?", tn) result = cur.fetchone() if(result): return True else: return False
ffe60c445a03530910084d01a7a488e7229bda0b
10,847
def calulate_loss_of_life(List_V, t): """ For list of V values, calculate loss of life in hours t = Time Interval (min) """ L = 0 for V in List_V: L += (V * t) # Sum loss of life in minutes for each interval LoL = L / 60 # Calculate loss of life in hours return LoL
ee2499af737cca764aad0a2f13794a925a172b9e
10,849
def to_hex(value, bit_count): """Converts an integer to a hexadecimal values with bit_count bits.""" return hex((value + (1 << bit_count)) % (1 << bit_count))
e7eaf89f7b5b43e6814a3d7faa5b2ef26320ac7d
10,850
def SplitPath(projects, path): """Common method to split SVN path into branch and filename sections. Since we have multiple projects, we announce project name as a branch so that schedulers can be configured to kick off builds based on project names. Args: projects: array containing modules we are interested in. It should be mapping to first directory of the change file. path: Base SVN path we will be polling. More details can be found at: http://buildbot.net/repos/release/docs/buildbot.html#SVNPoller. """ pieces = path.split('/') if pieces[0] in projects: # announce project name as a branch branch = pieces.pop(0) return (branch, '/'.join(pieces)) # not in projects, ignore return None
660d24107f643ed4103c2923451a6b08804afb1d
10,852
def assert_keys_in_dict(allowable_keys, d): """ Checks that all keys in d are in allowable keys Args: allowable_keys: Set or List of allowable keys d: Dict Returns: Boolean if satisfied, None if correct/key that is not in allowable keys """ for k in d: if k not in allowable_keys: return False, k return True, None
017fc447d22b755d8b8447f51f636e666ed72309
10,853
from pathlib import Path def read(fname: str) -> str: """Read file starts from root directory.""" with (Path(__file__).resolve().parent / fname).open() as f: return f.read()
340d814777f1f0ef6d5b97d430b3313db5e0a5ca
10,855
def _unique_names(item): """ Compute the unique key for the given (namespaceless) item within a single collection. """ return item.metadata.name
6da786ae1adae29a143b1e171a7206b25b8e9556
10,856
import six def fix_axes3d_color(ax, col): """ Setting color to None must cycle through the available colors as per matplotlib's philosophy. In some recent versions this was still not implemented however. This function fixes the issue. The issue was fixed in April 2016, and the code below hacks into: https://github.com/matplotlib/matplotlib/issues/5990 """ if col is not None: return col try: next_col = six.next(ax._get_patches_for_fill.prop_cycler)['color'] except AttributeError: next_col = six.next(ax._get_patches_for_fill.color_cycle) return next_col
5aafcb8609188507bdd3601e8de51be4639f06a1
10,857
import re import argparse def reverse_dns(value: str) -> str: """Validate `--org` argument Parameters ---------- value : str Passed by the ArgumentParser object Returns ------- str Returns the value back to the ArgumentParser object Raises ------ argparse.ArgumentTypeError The passed argument was not in reverse DNS notation """ # https://en.wikipedia-on-ipfs.org/wiki/Reverse_domain_name_notation.html regex = '^[A-Za-z]{2,6}((?!-)\.[A-Za-z0-9-]{1,63}(?<!-))+$' p = re.compile(regex) if not re.match(p, value): raise argparse.ArgumentTypeError( f"'{value}' not in reverse DNS notation") return value
c143f36dec7bdba5e628200b9e3044bf3d45f9fb
10,859
def normalize_title(title: str, body: str) -> str: """Normalize the title if it spills over into the PR's body.""" if not (title.endswith("…") and body.startswith("…")): return title else: return title[:-1] + body[1:].partition("\n")[0].rstrip("\r")
ec3d560855abcd85afe35839a4af025a2b365b45
10,860
def _linear_decay(value, origin, offset, scale, decay): """ https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html """ s = scale / (1 - decay) return max(0, (s - max(0, abs(value - origin) - offset)) / s)
9460624748cac35d97b5f3b51637fb41013958d2
10,861
def find_datacenter(response): """Grabs the X-Served-By header and pulls the last three characters as the datacenter Returns: string: the datacenter identification code """ xsb = response.headers['X-Served-By'] return xsb[len(xsb) - 3: len(xsb)]
2c2556e5e9ed2044ed810c90fc9f4b65bbd7650e
10,862
def wcGeneralSettingLookup(gSettings, sid): """ Lookup an ID in WooCommerce general settings. """ assert gSettings is not None assert isinstance(sid, str) for settings in gSettings: if "id" in settings and settings["id"] == sid: return settings return None
ab3ca8a7f8a15db8fa93eaabd3b0f377d31e3b0c
10,863
def binom_coeff(n): """ Calculate the binomial coefficient (n, 2), i.e. the number of distinct pairs possible in a set of size n :param n: size of set :return: number of pairs """ return int(n * (n-1) / 2)
b7f249524cda01ab8de14c7148745dd153d08b90
10,865
def homo_lumo_mix(C, nocc, beta): """ Mix a portion of LUMO to HOMO. Used when generating spin-unrestricted guess. """ if beta < 0. or beta > 1.: raise Exception("Mixing beta must be in [0, 1]") Cb = C.copy() homo = C[:, nocc - 1] lumo = C[:, nocc] Cb[:, nocc - 1] = (1. - beta) ** 0.5 * homo + beta ** 0.5 * lumo return Cb
081c9149ad2d5fe39a16796943fb4d2c53c7b3d7
10,866
from numpy import asarray from PIL import Image def resize_image(image, final_image_shape): """Utility to resize an image. Input: image: original image (2D numpy array of real numbers) final_image_shape: final size of the resized image (2-ple of positive integers) Output: resized_image: resized image (2D numpy array of real numbers) """ img = Image.fromarray(image).resize(final_image_shape) resized_image = asarray(img) return resized_image
97fdf504152151ca249243cdb56c9387b3eefad3
10,867
import json def loadJson(jsonfile): """ Reads a .json file into a python dictionary. Requires json package. """ with open(jsonfile, "r") as data: dictname = json.loads(data.read()) return dictname
de059637ea0dc1b0ef729a5d2bbfa7c3e72bb5b1
10,869
def restrict_chains(data, k): """Restrict data to people with at least k rows. Parameters ---------- data : pandas.DataFrame The `data` from US to be subsetted. k : int The minimum number of measurements needed for a person to be kept. Returns ------- data : pandas.DataFrame The subsetted data of people with at least k entires. """ # How many entries does each person have. # Take Ids of anyone with at least k values. # Subset the main data frame to remove anyone with less than k values. id_counts = data["pidp"].value_counts() trajectories_ids = list(id_counts.loc[id_counts >= k].index) data = data.loc[data["pidp"].isin(trajectories_ids)] return data
106fc9c43d12085392a84a188dcc8d40a89ae817
10,871
def list_of_elem(elem, length): """return a list of given length of given elements""" return [elem for i in range(length)]
d72bdab16a541714b2a0a781a3077d40e309e9f7
10,872
def all_ones(vector): """ Return True/False if all vector's entries are/are not 1s. """ return all([e==1 for e in vector])
5bd1509c72945de83f3e84e956efae39bd32fee0
10,874
def kappa_idx(n): """ Provide scalar products indexes for kappa values. Parameter: n -- integer Return: list_kappa_idx -- list of lists """ list_kappa_idx = [] if n == 5: list_kappa_idx0 = [['*00**', 0, False, False], ['*01**', 1, False, True], ['*10**', 2, True, False], ['*11**', 3, True, True]] list_kappa_idx1 = [['**00*', 0, False, False], ['**01*', 1, False, True], ['**10*', 2, True, False], ['**11*', 3, True, True]] list_kappa_idx2 = [['***00', 0, False, False], ['***01', 1, False, True], ['***10', 2, True, False], ['***11', 3, True, True]] list_kappa_idx3 = [['0***0', 0, False, False], ['0***1', 1, True, False], ['1***0', 2, False, True], ['1***1', 3, True, True]] list_kappa_idx4 = [['00***', 0, False, False], ['01***', 1, False, True], ['10***', 2, True, False], ['11***', 3, True, True]] list_kappa_idx = [list_kappa_idx0, list_kappa_idx1, list_kappa_idx2, list_kappa_idx3, list_kappa_idx4] if n == 3: list_kappa_idx0 = [['*00', 0, False, False], ['*01', 1, False, True], ['*10', 2, True, False], ['*11', 3, True, True]] list_kappa_idx1 = [['0*0', 0, False, False], ['0*1', 1, True, False], ['1*0', 2, False, True], ['1*1', 3, True, True]] list_kappa_idx2 = [['00*', 0, False, False], ['01*', 1, False, True], ['10*', 2, True, False], ['11*', 3, True, True]] list_kappa_idx = [list_kappa_idx0, list_kappa_idx1, list_kappa_idx2] return list_kappa_idx
1e19fb2eca480711346d262e91990b79d3d71928
10,877
def gfmul(x, y): """Returns the 128-bit carry-less product of 64-bit x and y.""" ret = 0 for i in range(64): if (x & (1 << i)) != 0: ret ^= y << i return ret
3639195b423e516ad374cd9dff263e9137634e68
10,878
def classifyData(classifier, testdata, classMap): """ Classifies data based on the previously trained model. Args: classifier: A classifier object that has been trained on the test corpus. testdata: A dataset to classify based on reads. classMap: A dictionary mapping class names to their class id used by scikit-learn. Returns: result: Test data as classified by the model. """ testdata.sort() X = [] # testdata = [[contigID, variable1, variable2, ...], ...] for item in testdata: X.append(item[1::]) # all but the first item Y = classifier.predict(X) # Step one: transpose the testdata matrix and extract all contigIDs contigIDs = list(zip(*testdata))[0] # https://stackoverflow.com/questions/4937491/matrix-transpose-in-python # Step two: combine the contigIDs with the results from the classifier result = list(zip(contigIDs, Y)) return result
ea2782a31b159cc937c8c8c69a634b403959e722
10,880
from typing import Tuple import subprocess def check_neo4j_running() -> Tuple[bool, str]: """Run shell commands to see if the neo4j instance is running.py Returns: Tuple[bool, str]: First value is boolean indicating if neo4j is running, second is a suggested stop command """ neo4j_status_check = lambda x: x.startswith("Neo4j is running") service_check = lambda x: "Active: active (running)" in x check_commands = ( (["neo4j", "status"], neo4j_status_check, "neo4j stop"), (["service", "neo4j", "status"], service_check, "systemctl stop neo4j"), (["systemctl", "neo4j", "status"], service_check, "systemctl stop neo4j"), ) for check_command, running_func, stop_cmd in check_commands: try: proc = subprocess.Popen(check_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.wait() (stdout, _) = proc.communicate() stdout_str = stdout.decode().strip() if running_func(stdout_str): return True, stop_cmd except (subprocess.CalledProcessError, FileNotFoundError): pass return False, ""
f37089c9971cae9c3472400f03af6026b4800043
10,882
def extract_details(df): """Extract step details for last 3 steps (deBoc, BHA and SNAr).""" df_RSinfo = df[['pentamer', 'Step details', 'RouteScore details', 'Isolated', 'RouteScore', 'log(RouteScore)']] last3_rxns = ['Buchwald_deprotection', 'Buchwald', 'SNAr'] for rxn in last3_rxns: df_RSinfo[rxn] = [next(step for step in row[-3:] if step['reaction'] == rxn) for row in df['Step details']] for key in df_RSinfo['RouteScore details'][0].keys(): df_RSinfo[key] = [row[key] for row in df['RouteScore details']] return df_RSinfo
c361f91b36a5270c93739d546779bcbb89944fbb
10,883
def multi_backend_test(globals_dict, relative_module_name, backends=('jax', 'tf'), test_case=None): """See backend.multi_backend_test.""" if test_case is None: return lambda test_case: multi_backend_test( # pylint: disable=g-long-lambda globals_dict=globals_dict, relative_module_name=relative_module_name, backends=backends, test_case=test_case) return test_case
7e3ec1e7cb5b85490b9e54e2ad951ad2786e3fed
10,884
import os def _test_data_dir(): """Return path to directory for bot and server data.""" root_dir = os.environ['ROOT_DIR'] return os.path.join(root_dir, '_test_data')
b6dd9032f3e7d2ed8609af5629a915339ed77794
10,885
import sys def get_hw_num_and_type_from_zip_name(zip_file_name): """Determinate homework number and it's type from the zip file name. """ try: file_name = zip_file_name.lower() is_project = "проект" in file_name is_main_hw = not is_project and not "практикум" in file_name if is_project: hw_num = 1 if ("първия" in file_name) else (2 if ("втория" in file_name) else 0 ) elif is_main_hw: hw_num = 1 if ("първо" in file_name) else (2 if ("второ" in file_name) else 0) else: hw_num = 1 if hw_num == 0: print("Couldn't determine hw/project number.", file=sys.stderr) sys.exit(5) except: print(zip_file_name + " does not look like moodle's zip file.", file=sys.stderr) sys.exit(3) else: return (hw_num, is_project, is_main_hw)
7fecc39a3e10317744626f4e562129edb91c006a
10,886
def kontejnery_verejne(kontejnery_json): """Funkce vybere jen kontejnery s veřejným přístupem""" verejne_kont = [] for kontejnery in kontejnery_json["features"]: pristup = kontejnery["properties"]["PRISTUP"] if pristup == "volně": souradnice = kontejnery["geometry"]["coordinates"] verejne_kont.append(souradnice) return verejne_kont
d9179acd6ad8c94330c1c0499815cda77e84fbf4
10,887
def file_is_text(file): """ Vérifie qu'un fichier est au format texte et non binaire :param file: Chemin vers le fichier :return: Vrai si le fichier est au format texte, faux s'il est au format binaire """ textchars = bytearray([7, 8, 9, 10, 12, 13, 27]) + bytearray(range(0x20, 0x100)) is_plaintext = lambda _bytes: not bool(_bytes.translate(None, textchars)) with open(file, "rb") as f: return is_plaintext(f.read(1024))
6a49486aa05e8627e7a0f6504e5c9b86c050df81
10,889
import sys import os def get_default_home_dir(): """ Return the home directory (valid on linux and windows) """ if sys.platform != 'win32': return os.path.expanduser('~') def valid(path): if path and os.path.isdir(path): return True return False def env(name): return os.environ.get(name, '') homeDir = env('USERPROFILE') if not valid(homeDir): homeDir = env('HOME') elif not valid(homeDir): homeDir = '%s%s' % (env('HOMEDRIVE'), env('HOMEPATH')) elif not valid(homeDir): homeDir = env('SYSTEMDRIVE') elif not valid(homeDir): homeDir = 'C:\\' if homeDir and (not homeDir.endswith('\\')): homeDir += '\\' return homeDir
d7d6ead0552b43c80fa4eae02fa4bc62ca3159a6
10,890
def number_normalization(value, fromvalue, tovalue): """数値を範囲内の値に正規化する。 value: 正規化対象の数値。 fromvalue: 範囲の最小値。 tovalue: 範囲の最大値+1。 """ if 0 == tovalue: return value if tovalue <= value or value < fromvalue: value -= (value // tovalue) * tovalue if value < fromvalue: value += tovalue return value
912c515991246204ebc4d5eae8ffedb1c6d5823b
10,891
import getpass import os def encrypt(message): """ Encrypts a string using AES-256 (CBC) encryption A random initialization vector (IV) is padded as the initial 16 bytes of the string The encrypted message will be padded to length%16 = 0 bytes (AES needs 16 bytes block sizes) """ return message print(u"\nPlease enter a password to encrypt the settings file:") hashed_password = bcrypt.hashpw(getpass.getpass().encode("utf-8"), bcrypt.gensalt()).decode("utf-8") with open(os.path.expanduser(u"~") + u"/.CanvasSync.pw", "w",encoding="utf-8") as pass_file: pass_file.write(hashed_password) # Generate random 16 bytes IV IV = os.urandom(16) # AES object encrypter = AES.new(get_key_hash(hashed_password), AES.MODE_CBC, IV) # Padding to 16 bytes if len(message) % 16 != 0: message += " " * (16 - (len(message) % 16)) # Add the unencrypted IV to the beginning of the encrypted_message encrypted_message = IV + encrypter.encrypt(message.encode("utf-8")) return encrypted_message
8d0df24ee297bdfad070cf0134a03d5c4f4eed87
10,892
def sets_diff(self: list, other: list, name: str, loc: str) -> list: """ Function to compare the sets of two lists. Returns a list of diff strings containing name and location. :param self: list :param other: list :param name: str :param loc: str :return: list[str] """ diffs = [] self_extra = set(self) - set(other) if self_extra: diffs.append(f"+ {name}@{loc}: {list(self_extra)}") other_extra = set(other) - set(self) if other_extra: diffs.append(f"- {name}@{loc}: {list(other_extra)}") return diffs
04a295051f409f7748fca00ade9e509a74bcd188
10,893
def caculate_matmul_shape(matrix_A_dim, matrix_G_dim, split_dim): """get matmul shape""" split_dimA = split_dim split_dimG = split_dim if matrix_A_dim % split_dim == 0: batch_w = matrix_A_dim // split_dim else: if matrix_A_dim < split_dim: batch_w = 1 split_dimA = matrix_A_dim else: batch_w = matrix_A_dim // split_dim + 1 if matrix_G_dim % split_dim == 0: batch_h = matrix_G_dim // split_dim else: if matrix_G_dim < split_dim: batch_h = 1 split_dimG = matrix_G_dim else: batch_h = matrix_G_dim // split_dim + 1 matrix_A_shape = (batch_h, batch_w, split_dimA, split_dimA) matrix_G_shape = (batch_h, split_dimG, split_dimG) return matrix_A_shape, matrix_G_shape
54a036103a97b739d00c3bd416e7dc49b1cba9e2
10,895
def percolation_finder(m, max_num_percolation=6): """ Step 3 percolation finder """ walks = { 'walk': [] } # Name binding def redundancy_index(v): n = len(v) occurrence_vector = [v.count(i) for i in range(n)] return max(occurrence_vector) - 1 + occurrence_vector.count(0) * 1.0 / n def scout(m, breadcrumbs): n_cols = m.shape[1] crumbs_number = len(breadcrumbs) # print breadcrumbs if crumbs_number < n_cols: col_index_arranged = list(set(range(n_cols)) - set(breadcrumbs)) + list(set(breadcrumbs)) for j in col_index_arranged: if m[crumbs_number, j] == 0: # More scouts are launched only if some place in the walks' vector is available # Rearranging indexes may guarantee that a percolation with 0 redundancy index is stored in the # the first explorations, when available. if len(walks['walk']) < 2 * max_num_percolation: scout(m, breadcrumbs + [j]) elif crumbs_number == n_cols: walks_recorder(breadcrumbs) def walks_recorder(v): ri = redundancy_index(v) walks['walk'] = walks['walk'] + [v] + [ri] first_zero = True for j in range(m.shape[1]): if m[0, j] == 0: if first_zero: # reset to [] the list of walks walks['walk'] = [] first_zero = False # print 'A scout goes in mission' scout(m, [j]) if len(walks['walk']) == 0: raise TypeError('Input matrix has no 0-percolations.') return [m, walks['walk']]
16f33deb1bbf1e28fab268228e9690c6acad07bc
10,896
def validate_overlap(periods, datetime_range=False): """ Receives a list with DateRange or DateTimeRange and returns True if periods overlap. This method considers that the end of each period is not inclusive: If a period ends in 15/5 and another starts in 15/5, they do not overlap. This is the default django-postgresql behaviour: https://docs.djangoproject.com/en/dev/ref/contrib/postgres/fields/#daterangefield """ periods.sort() no_overlap = [True] for each in range(0, len(periods) - 1): latest_start = max(periods[each].lower, periods[each + 1].lower) earliest_end = min(periods[each].upper, periods[each + 1].upper) delta = earliest_end - latest_start if datetime_range: no_overlap.append(max(0, delta.total_seconds()) == 0) else: no_overlap.append(max(0, delta.days) == 0) return False if all(no_overlap) else True
bae96eb890063e4d27af0914e7fcd1348d1340a7
10,898
from pathlib import Path def get_dump(): """Helper for creating and returning a dump Path""" dump = Path("pyinseq/tests/dump") if not dump.exists(): dump.mkdir() return dump
d34d149c6de235e58cbee222067b8023afcf8f93
10,900
import copy def mutually_exclusive_group(group): """Decorator function for mutually exclusive :mod:`argparse` arguments. Args: group (list of tuples): A list of the standard :mod: `argparse` arguments which are mutually exclusive. Each argument is represented as a tuple of its args and kwargs. Returns: callable: A decorator that can be used to decorate a command function. """ def arg_for_exclusive_group(default=None): """ Args: default (str): The default argument Returns: callable: A decorator that can be used to decorate a command function. """ def decorator(fn): if not hasattr(fn, "parser_mutually_exclusive_groups"): fn.parser_mutually_exclusive_groups = [] grp = copy.deepcopy(group) if default: for args, kwargs in grp: if default == args[0]: kwargs["help"] += " (default)" fn.parser_mutually_exclusive_groups.append(grp) return fn return decorator return arg_for_exclusive_group
c6875840371c9a6fb6060c7148b734248a6f0b41
10,902
def _GetStepLogViewUrl(build, full_step_name, log_name, partial_match=False): """Gets view url of the requested log. Args: build (buildbucket_proto.build_pb2.Build proto): Information about a build. full_step_name (str): Full name of the step. log_name (str): Type of the log. partial_match (bool): If the step_name is not found among the steps in the builder, allow the function to retrieve the step log for a step whose name contains step_name as a prefix. Returns: (str): view_url of the requested log. """ for step in build.steps or []: if step.name == full_step_name: for log in step.logs or []: if log.name.lower() == log_name: return log.view_url if partial_match: for step in build.steps or []: if step.name.startswith(full_step_name): for log in step.logs or []: if log.name.lower() == log_name: return log.view_url return None
7e240e0414c8d83620d701d348d3386cb5054226
10,903
import itertools def pairwise(iterable): """ Iterate pairwise through an iterable. pairwise([1,2,3,4]) -> (1,2),(2,3),(3,4) """ val, nextVal = itertools.tee(iterable) next(nextVal, None) return zip(val, nextVal)
495fedbaf2046d66bd791dc78dea8525761e01b1
10,905
import numpy def lamb2(v,r): """Approximate the Lambert W function. Approximate the Lambert W function from its upper and lower bounds. The result replicates the triple point of the (idealized) system exactly, because lamb2(0,r) = 1. :arg float v: Modified argument of the function. Must be positive. :arg float r: Latent heat/heat capacity ratio. Must be positive. :returns: x-value in the Lambert function. """ u0 = r - numpy.log(1+r) negz = 1 + (2*(u0-v))**.5 + (r - (2*u0)**.5)/u0*(u0-v) x = negz / (1+r) return x
358907f75de539d3da88935b0d470f72310c3b57
10,906
def create_mating_pool(population, fitnesses, norm=True): """ Generate a mating pool This will create a new population proportional to the fitnesses of the original population. The pool will the be used as the basis for generating the next generation. Parameters ---------- population : list of MiniMaxAgent The current population fitnesses : list of int The fitness values for each member of the population norm : bool True will apply basic normilization to the fitness values before creating the pool Returns ------- list of MiniMaxAgent The mating pool with the frequency of each agent proportional to its fitness """ if norm: mx = max(fitnesses) fitnesses = [ int((f / mx) * 10.0) for f in fitnesses ] pool = [] for i, fitness in enumerate(fitnesses): for _ in range(fitness): pool.append(population[i]) return pool
1ea329c334ffa54527aacf8fba7a33b55c927eb1
10,907
import os def path_components(path): """Find all directories that make up a full path.""" components = [path] current = path while current and current != '/': current, _ = os.path.split(current) components.append(current) return components
c36e15bc75735a60a254a0e203a2fc142497423b
10,908
def make_row(path1, path2, dt1, dt2, diff_man, diff_euc): """Make a list containing all row info. For Hive and RPi number, just add the two columns later. header = [...] """ # # Get interval duration # td = (dt2 - dt1) # dur = td.total_seconds() # dt_center = dt1 + (td / 2) # Shorten paths # # relpath1 = rel_path(path1) # # relpath2 = rel_path(path2) relpath1 = path1.relative_to(path1.parent.parent) relpath2 = path2.relative_to(path2.parent.parent) row = [ # dt_center, dur, # Calculate columns later all at once.. dt1, dt2, # path1, path2, relpath1, relpath2, diff_man, diff_euc, ] return row
21159e7d109b4019f632c2644354c97f8416a80f
10,909
def get_target_name(label): """ Try to extract the target_name from a given PDS label. Parameters ---------- label : Object Any type of pds object that can be indexed Returns ------- target_name : str The defined target_name from the label. If this is None a target name could not be pulled from the label """ try: target_name = label['TARGET_NAME'] except KeyError: return None return target_name
7aae704d4590fb44bfb650481a65af870c7c6570
10,910
def _get_str_from_bin(src: bytearray) -> str: """Join data in list to the string. :param src: source to process :type src: bytearray :return: decoded string :rtype: str """ return src.rstrip().decode(encoding="utf-8", errors="backslashreplace")
a51d562ea06ff80d3a7f7c10622eb599cd9a1e83
10,911
import os def version_consistency(self): """Pipeline and container version number consistency. .. note:: This test only runs when the ``--release`` flag is set for ``nf-core lint``, or ``$GITHUB_REF`` is equal to ``master``. This lint fetches the pipeline version number from three possible locations: * The pipeline config, ``manifest.version`` * The docker container in the pipeline config, ``process.container`` * Some pipelines may not have this set on a pipeline level. If it is not found, it is ignored. * ``$GITHUB_REF``, if it looks like a release tag (``refs/tags/<something>``) The test then checks that: * The container name has a tag specified (eg. ``nfcore/pipeline:version``) * The pipeline version number is numeric (contains only numbers and dots) * That the version numbers all match one another """ passed = [] failed = [] # Get the version definitions # Get version from nextflow.config versions = {} versions["manifest.version"] = self.nf_config.get("manifest.version", "").strip(" '\"") # Get version from the docker tag if self.nf_config.get("process.container", "") and not ":" in self.nf_config.get("process.container", ""): failed.append( "Docker slug seems not to have a version tag: {}".format(self.nf_config.get("process.container", "")) ) # Get config container tag (if set; one container per workflow) if self.nf_config.get("process.container", ""): versions["process.container"] = self.nf_config.get("process.container", "").strip(" '\"").split(":")[-1] # Get version from the $GITHUB_REF env var if this is a release if ( os.environ.get("GITHUB_REF", "").startswith("refs/tags/") and os.environ.get("GITHUB_REPOSITORY", "") != "nf-core/tools" ): versions["GITHUB_REF"] = os.path.basename(os.environ["GITHUB_REF"].strip(" '\"")) # Check if they are all numeric for v_type, version in versions.items(): if not version.replace(".", "").isdigit(): failed.append("{} was not numeric: {}!".format(v_type, version)) # Check if they are consistent if len(set(versions.values())) != 1: failed.append( "The versioning is not consistent between container, release tag " "and config. Found {}".format(", ".join(["{} = {}".format(k, v) for k, v in versions.items()])) ) passed.append("Version tags are numeric and consistent between container, release tag and config.") return {"passed": passed, "failed": failed}
df037cedc3ca5a95488560d294fcf1b19eb49999
10,912
def unique(obj): """ Unique with order """ temp = set() return [x for x in obj if x not in temp and not temp.add(x)]
2d4fc7a5ba39f1fcc84ccebc1f8604c8ca972e8c
10,913
from typing import List from typing import Any def flatten_list(x: List[Any]) -> List[Any]: """ Converts a list of lists into a flat list. Args: x: list of lists Returns: flat list As per http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python """ # noqa return [item for sublist in x for item in sublist]
b600cce1dc88869c60c80019f3be9ea5245cdda7
10,916
import argparse import os def parse_args(): """Training Options for Depth Prediction Experiments""" parser = argparse.ArgumentParser(description='MXNet Gluon Monodepth2 Demo') # model and dataset parser.add_argument('--model_zoo', type=str, choices=['monodepth2_resnet18_kitti_stereo_640x192', 'monodepth2_resnet18_kitti_mono_640x192', 'monodepth2_resnet18_kitti_mono_stereo_640x192'], default='monodepth2_resnet18_kitti_mono_stereo_640x192', help='choose depth model from model zoo model') parser.add_argument('--input_format', type=str, choices=['image', 'video'], default='image', help='choose the format of input data') parser.add_argument("--data_path", type=str, help="path to the data") parser.add_argument("--height", type=int, help="input image height", default=192) parser.add_argument("--width", type=int, help="input image width", default=640) parser.add_argument('--prediction_only', action="store_true", help='if true, just store pure prediction results') parser.add_argument('--use_depth', action="store_true", help='use depth map as prediction results') parser.add_argument('--output_format', type=str, choices=['image', 'video'], default='video', help='choose the format of output') parser.add_argument("--output_path", type=str, help="path to store the results", default=os.path.join(os.path.expanduser("."), "tmp")) # the parser args = parser.parse_args() return args
3fbf77a2b56f9b0a8e7d9c8165a2d20101464ab8
10,918
def _get_const_info(const_index, const_list): """Helper to get optional details about const references Returns the dereferenced constant and its repr if the constant list is defined. Otherwise returns the constant index and its repr(). """ argval = const_index if const_list is not None: argval = const_list[const_index] return argval, repr(argval)
e495685a5d742f014cbe14cda23e2cc97ba2761d
10,919
def return_subset_number(pedigree): """ Find out how many founders there are in the given file. """ founder = 0 with open(pedigree, 'r') as filein: for lines in filein: line = lines.rstrip().split() if line[2] == "0" and line[3] == "0": founder += 1 return founder
090661dfbc367fe3974fe2dfa2069f97dd70ee75
10,921
def tool_no_apply(cols): """Gets the tool number from the PMC signal Explanation =========== Same explanation as in the cut_signal_apply function """ pmc = cols[0] if (pmc - 64) > 0: return int(pmc - 64) else: return int(pmc)
8d094cabdab58bd009d9019de8896c772e5fc72a
10,922
def loop_until_choice_is_correct(msg_prompt: str, valid_choices: dict) -> str: """ Loop until a valid value is provided, which must match a key from `valid_choices`. Args: msg_prompt (str): Prompt to display to the user to enter a choice. valid_choices (dict, optional): Keys are valid choices. Returns: str: String corresponding to the value of the matching key in `valid_choice`. """ choice = None while True: input_string = input(f"{msg_prompt}: ") if input_string not in valid_choices: print("Please enter a valid choice.") else: choice = valid_choices[input_string] break return choice
bb05f1d11b14611ffcada93629382bfb8a447fad
10,923
import argparse import sys def parse_args(args): """Parse command-line arguments. Parameters ---------- args : list of strings command-line arguments. Returns ------- options : :class:`argparse.ArgumentParser` Command line arguments. """ parser = argparse.ArgumentParser(description = __doc__) parser.add_argument('-s', '--spin', type=str, dest='spin', default=None, help='Spin component to extract.' 'Options: up/down') parser.add_argument('-t', '--type', type=str, dest='type', default=None, help='Type of green\'s function to extract.' 'Options: lesser/greater') parser.add_argument('-k', '--kspace', dest='kspace', action='store_true', default=False, help='Extract kspace green\'s function.') parser.add_argument('-e', '--elements', type=lambda s: [int(item) for item in s.split(',')], dest='elements', default=None, help='Element to extract.') parser.add_argument('-o', '--observable', type=str, dest='obs', default='None', help='Data to extract') parser.add_argument('-p', '--plot-energy', action='store_true', dest='plot', default=False, help='Plot energy trace.') parser.add_argument('-f', nargs='+', dest='filename', help='Space-separated list of files to analyse.') options = parser.parse_args(args) if not options.filename: parser.print_help() sys.exit(1) return options
f0c29be28a5a49422ee5bea7bb294cb0cb3a45fa
10,924
def joli_string(f): """renvoie une jolie chaine de caractèrepour la formule f """ operator = { 'OU': 'u', 'ET': 'n', 'NON': '-', 'IMPL': '=>', 'EQ': '<=>', 'VRAI': 'V', 'FAUX': 'F' } n = f.nb_operandes() if n == 0: v = f.get_val() if v in operator: return operator[v] else: return v elif n == 1: f2 = (f.decompose())[0] op = operator[f.operateur()] return op + joli_string(f2) else: [f2, f3] = f.decompose() op = operator[f.operateur()] return '(' + joli_string(f2) + ') ' + op + ' (' + joli_string(f3) + ')'
bf2160481f444fa690c61750ba286fe1739d9291
10,926
import optparse import sys def parse_options(): """Generate command line options.""" ALLOWED_COMMANDS = ["list-vdis", "clean-vdis", "list-instances", "clean-instances", "test"] arg_str = "|".join(ALLOWED_COMMANDS) parser = optparse.OptionParser("%prog [options] [" + arg_str + "]") parser.add_option("--verbose", action="store_true") options, args = parser.parse_args() if not args: parser.print_usage() sys.exit(1) return options, args
862d6881eb08ae088bfbdbd8b9f4165c5f1dbc47
10,927
def iget_list_column_slice(list_, start=None, stop=None, stride=None): """ iterator version of get_list_column """ if isinstance(start, slice): slice_ = start else: slice_ = slice(start, stop, stride) return (row[slice_] for row in list_)
1c604be6c59d43fb1c2b1b1a3078f5e98105347f
10,928
def _generate_widget_parameters(bot_name, user_photo, size, corner_radius, access_write): """ Generate common widget embed code parameters. """ user_photo_bool = str(user_photo).lower() data_telegram_login = 'data-telegram-login="{}" '.format(bot_name) data_size = 'data-size="{}" '.format(size) data_userpic = 'data-userpic="{}" '.format(user_photo_bool) if not user_photo else '' data_radius = 'data-radius="{}" '.format(corner_radius) if corner_radius else '' data_request_access = 'data-request-access="write"' if access_write else '' return data_telegram_login, data_size, data_userpic, data_radius, data_request_access
97132676a5e7218b1224e9be0fff4e178157c4ca
10,929
def repunctuate_character(letters, punctuation): """Apply the recorded punctuation to a character. The letters must be an iterator of base characters.""" if punctuation == 'UPPER': return next(letters).upper() elif punctuation == 'LOWER': return next(letters).lower() else: return punctuation
92cd8b30466e19a8ce1278f0b81440399c7f809c
10,931
def get_tree_coords(tree): """Takes Phylo tree object and populates it with coordinates that can be used to plot the tree from scratch""" for _i, i in enumerate(tree.get_terminals()): i.y = _i i.x = tree.distance(i) for i in reversed(tree.get_nonterminals()): _ = i.clades i.y = (_[0].y + _[-1].y)/2 i.x = tree.distance(i) return tree
debd0a995dddab8a0287a3d553a0edeacef5e777
10,932
import torch def _index(tensor_3d, tensor_2d): """This function is used to index a 3d tensors using a 2d tensor""" x, y, z = tensor_3d.size() t = tensor_3d.reshape(x * y, z) tt = tensor_2d.reshape(x * y) v = t[torch.arange(x * y), tt] v = v.reshape(x, y) return v
f2316f49a77fc094277a221318f9c0ede18da688
10,935
def split_set_by_point(coord_array, point): """ point_array : array of Coord point : Coord Return the array of the points on the left and the right of the point """ left = [] right = [] for coord in set(coord_array): if coord.x < point.x or coord.y < point.y: left.append(coord) else: right.append(coord) return left, right
6a7ee4387f2bc74fcf06262bc4262351c66b3a1c
10,937
def replace_group_with_whitespace(match_obj): """ :type match_obj: MatchObject """ match_start, match_stop = match_obj.span(1) overall_start, overall_stop = match_obj.span(0) start_offset = match_start - overall_start stop_offset = (match_stop-match_start) + start_offset new_str = '{}{}{}'.format(match_obj.group(0)[:start_offset], ' '*(stop_offset-start_offset), match_obj.group(0)[stop_offset:]) return new_str
93a0328f1e3afae13422d2bbca12d9052f9bfb65
10,938
def generate_repr_code(repr, node, fields): """ The CPython implementation is just: ['return self.__class__.__qualname__ + f"(' + ', '.join([f"{f.name}={{self.{f.name}!r}}" for f in fields]) + ')"'], The only notable difference here is self.__class__.__qualname__ -> type(self).__name__ which is because Cython currently supports Python 2. """ if not repr or node.scope.lookup("__repr__"): return "", {}, [] code_lines = ["def __repr__(self):"] strs = [u"%s={self.%s!r}" % (name, name) for name, field in fields.items() if field.repr.value and not field.is_initvar] format_string = u", ".join(strs) code_lines.append(u' name = getattr(type(self), "__qualname__", type(self).__name__)') code_lines.append(u" return f'{name}(%s)'" % format_string) code_lines = u"\n".join(code_lines) return code_lines, {}, []
79b374d013d4bbffe8f2c78d94542f9ff5633b43
10,939
def offset(freqs, re0, im0): """Complex offset re + j*im. Freqs vector is ignored, but required for lmfit Model.""" return re0 + 1j * im0
ae2b373806dee6e50fb61c4945b99f7ef7c9a6f7
10,941
def after_request(response): """Modifies the response object prior to sending it to the client. Used to add CORS headers to the request Args: response (response): Flask response object Returns: `None` """ response.headers.add('Access-Control-Allow-Origin', '*') response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization') response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE') return response
96bf051001b0f9eb4a31f2ce101a23bae39c49c9
10,942
import re def split_camel_cased(text): """ Split camelCased elements with a space. :param text: the text to be converted processed. :type: str :return: text with all camelCased elements split into different elements :type: str """ return re.sub('(?!^)([A-Z][a-z]+)', r' \1', text)
6de55ed7d8fc4bc06e0d16d4b327999ad656ceac
10,943
def get_dict_from_namespace_object(args): """Unwrap the namespace object to pull out a dict of values""" return vars(args)
e1c34c1faff71ae330a44d91a399260fbdc454c6
10,947