content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_X_train(data, index_name): """ Import the pandas-data and select and sliced it according the column-index-list Parameters ---------- data: ndarray Pandas data-stream index_name: str-list Columns-Names for removing columns out the data-stream Returns ---------- features: str-list feature-list of the not-selected column-names X_train: ndarray pandas.dataframe for X-dataset """ features = [i for i in list(data.columns.values) if i not in index_name] X_train = data[features] return features, X_train
0fe32aa2f1e2c28097ba67887774b656a92f23b3
78,808
def extract_feature_target(df, target, todf=True, exclusions=None): """ Separate feature set and target as ndarrays Parameters ---------- df: data frame target: column name in string Returns ------- X: All columns except target y: target column features: list of column names except target """ if exclusions is None: exclusions= [] all_columns = list(set(df.columns) - set(exclusions)) features = list(set(all_columns) - set([target])) # convert as ndarray # dtype changed - all object if todf == False: # y = df[[target]].ix[:, 0].values y = df[[target]].to_numpy() y = y.reshape(-1) X = df[features].values else: y = df[[target]] X = df.loc[:, df.columns != target] return (X, y, features)
abac6e0495cde1d3ebc2d02c61e43a081afaf1bf
78,809
def normalize_hu(image): """Rescale an image such that tissue with Houndsfield units between air and bone is scaled to between 0 and 1. Tissue that is not that dense is clipped accordingly. Args: image: numpy.array Returns: numpy.array """ MIN_BOUND = -1000.0 # Air MAX_BOUND = 400.0 # Bone image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND) image[image > 1] = 1. image[image < 0] = 0. return image
78a582b5ab866f77303bdbb4155cb66563255e78
78,813
def select_points_in_box(xy, dim): """Only return the points within the limits of the box. Parameters ---------- xy : ndarray Locations of each point. dim : tuple or float Dimension of the box (x0, y0, dx, dy) or just a float indicating a box jutting out from the origin. Returns ------- ndarray xy of selected points """ if type(dim) is float or type(dim) is int: dim = (0, 0, dim, dim) x0 = dim[0] x1 = dim[0] + dim[2] y0 = dim[1] y1 = dim[1] + dim[3] selectix = (xy[:,0]>=x0) & (xy[:,0]<=x1) & (xy[:,1]>=y0) & (xy[:,1]<=y1) return xy[selectix]
40a03bc7da810ec857732e5e5ac71105632b293e
78,815
def exponential_growth(level, constant=1): """ The number of samples in an exponentially growing 1D quadrature rule of a given level. Parameters ---------- level : integer The level of the quadrature rule Return ------ num_samples_1d : integer The number of samples in the quadrature rule """ if level == 0: return 1 return constant*2**(level+1)-1
0ca35e51b3397cfce25272f7447ba2b88f96964c
78,818
def create_mpls_interface_desc(router_name, line_id, role): """ pattern for interfaces with MPLS router connected: - <MPLS_PROVIDER><ROLE><MPLS><ROUTER_NAME><LINE_ID> """ if role.lower() == "main": result = f'<MPLS_PROVIDER><MAIN><MPLS><{router_name}><{line_id}>' return result elif role.lower() == "backup": result = f'<MPLS_PROVIDER><BACKUP><MPLS><{router_name}><{line_id}>' return result
4c43f530721ade7238e55136c37bccde17d503e3
78,821
def command(table): """Returns a function object to be used as a decorator for making commands. This function receives a command table as its argument. The table should be a dict. The returned function can be used as a decorator for adding commands to that command table. This function accepts multiple arguments to define a command. The first argument is the command name. The options argument is an iterable of tuples defining command arguments. See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple. The synopsis argument defines a short, one line summary of how to use the command. This shows up in the help output. The norepo argument defines whether the command does not require a local repository. Most commands operate against a repository, thus the default is False. The optionalrepo argument defines whether the command optionally requires a local repository. The inferrepo argument defines whether to try to find a repository from the command line arguments. If True, arguments will be examined for potential repository locations. See ``findrepo()``. If a repository is found, it will be used. """ def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False, inferrepo=False): def decorator(func): func.norepo = norepo func.optionalrepo = optionalrepo func.inferrepo = inferrepo if synopsis: table[name] = func, list(options), synopsis else: table[name] = func, list(options) return func return decorator return cmd
e50aaac7c4546bd575786fb8639fe7e546664913
78,822
import random def TallestPareto(iters=2, n=10000, xmin=100, alpha=1.7): """Find the tallest person in Pareto World. iters: how many samples to generate n: how many in each sample xmin: parameter of the Pareto distribution alpha: parameter of the Pareto distribution """ tallest = 0 for i in range(iters): t = [xmin * random.paretovariate(alpha) for i in range(n)] tallest = max(max(t), tallest) return tallest
01b5927e3e34f4a94344eb370c304af6ba2f2594
78,831
import random def random_disjoint_interval(start, end, avoid_start, avoid_end): """ Sample a value in [start, avoid_start] U [avoid_end, end] with uniform probability """ val = random.uniform(start, end - (avoid_end - avoid_start)) if val > avoid_start: val += (avoid_end - avoid_start) return val
895b2c9e6d757cb0d0cce69e32650d62233d044c
78,832
def numPositionsInRing(ring): """Number of positions in ring (starting at 1) of a hex lattice.""" return (ring - 1) * 6 if ring != 1 else 1
5f5dfedf7f5ae43f239afa195beeb80bea8a75a7
78,836
def is_task_terminal(task): """Return whether a given mesos task is terminal. Terminal states are documented in http://mesos.apache.org/api/latest/java/org/apache/mesos/Protos.TaskState.html :param task: the task to be inspected :returns: a boolean indicating if the task is considered to be in a terminal state """ return task['state'] in ['TASK_ERROR', 'TASK_KILLED', 'TASK_FAILED', 'TASK_FINISHED']
4e6bc8b98a9a0db71917e79f138f4b76c06d546a
78,839
def _partition(iterable, n): """Partition an iterable into tuples.""" it = iter(iterable) z = (it,) * n return zip(*z)
507472578b68f7933ddf0f654c98adc73918cfc6
78,847
def is_loopback(host): """Return True of the IP or the host is a loopback Return False if not. """ loopbacks = ['127', '::1'] for item in loopbacks: if host.startswith(item): return True return False
236ee11c738aff00e1a1531dcb1360a4d2c378a0
78,851
def string_to_boolean(string: str) -> bool: """Convert string representations of TRUE/FALSE to a boolean value. :param string: the string to convert. :return: _result :rtype: bool """ _string = str(string) return _string.lower() in ["true", "yes", "t", "y"]
4961a9541160f2ebfc02a4d3b8a42c2699600719
78,852
def apply_perm(omega,fbn): """ omega contains a list which will be permuted (scrambled) based on fbm. fbm is a list which represents a factorial base number. This function just translates the pseudo code in the Rosetta Code task. """ for m in range(len(fbn)): g = fbn[m] if g > 0: # do rotation # save last number new_first = omega[m+g] # move numbers right omega[m+1:m+g+1] = omega[m:m+g] # put last number first omega[m] = new_first return omega
b1d10b4777b1416ba4030564e15c3af9efabbd58
78,853
import re def _empty_readability_check(summary: str) -> bool: """Check if the readability summary is actually an empty entry or not.""" # It must be smaller than 1k chars, and contain a body tag, which should # not be there as summary should strip it. return (len(summary) > 1000 or not bool(re.match(r'<body.+</body>', summary, re.MULTILINE | re.DOTALL)))
e07fb4c9a203e987761e5909f1bae3bbcc0807fa
78,860
def get_scorer_bodyparts(tracking): """ Given the tracking data hierarchical df from DLC, return the scorer and bodyparts names """ first_frame = tracking.iloc[0] try: bodyparts = first_frame.index.levels[1] scorer = first_frame.index.levels[0] except: raise NotImplementedError( "Make this return something helpful when not DLC df" ) return scorer, bodyparts
b5449d3f18ef89f1902a8c0e0fbcf19ca294f2f3
78,863
def center_coordinates(coords, size, binning=1): """ Given an XYZ tuple of particle coordinates and the reconstruction they came from, shift the coordinates so that the origin is at the center of the tomogram Args: coords: the (x, y, z) coordinates for the particle size: the reconstruction MRC half-dimensions in (nx/2, ny/2, nz/2) form binning: the bin factor from the original stack to the final reconstruction Returns: the new coordinates as a (x, y, z) tuple """ return ( float(coords[0]) / binning - size[0], float(coords[1]) / binning - size[1], float(coords[2]) / binning - size[2], )
f989595d5cfaf4cf9d391e316dee20dfa155e508
78,865
def get_child_attr(obj, hier_attr_name): """Recursively traverses `par_obj`; returns the sub-child attribute. Args par_obj: hier_attr_name (str): e.g., attr_1.attr_2.attr_3 Return: hierarchical child attribute value. """ if "." not in hier_attr_name: return getattr(obj, hier_attr_name) attrs = hier_attr_name.split(".") curr_attr_val = getattr(obj, attrs[0]) return get_child_attr(curr_attr_val, ".".join(attrs[1:]))
9cb0d56d6606c9e6f4fe7b3b8fc81d54120564c9
78,870
def get_all_tokens_in_dataset(X_train_tokenized, X_test_tokenized): """returns a list including all unique tokens in train and test set Arguments: X_train_tokenized {DataFrame} -- train set X_test_tokenized {DataFrame} -- test set Returns: list -- list of unique tokens """ X_train_sublists = X_train_tokenized.values.flatten() X_train_tokens = set([item for sublist in X_train_sublists for item in sublist]) X_test_sublists = X_test_tokenized.values.flatten() X_test_tokens = set([item for sublist in X_test_sublists for item in sublist]) return list(X_train_tokens | X_test_tokens)
8216205654f5102476d88f3158b8365919fca3ab
78,872
import re def _FindAllMatches(regex, s): """Generates all matches of regex in string s.""" r = re.compile(regex) return r.finditer(s)
ea2e8be1684fbb935c7ccad9f3020d31f943ebe4
78,876
def cartes(seq0, seq1, modus='pair'): """ Return the Cartesian product of two sequences >>> from sympy.utilities.iterables import cartes >>> cartes([1,2], [3,4]) [[1, 3], [1, 4], [2, 3], [2, 4]] """ if modus == 'pair': return [[item0, item1] for item0 in seq0 for item1 in seq1] elif modus == 'triple': return [item0 + [item1] for item0 in seq0 for item1 in seq1]
4e555da9ad8b674ca4fbc4f14fd44ccde8fa5da8
78,877
import torch def gen_cycles(num_cycles, batch_size, cycle_length=2): """Generates cycles for alignment. Generates a batch of indices to cycle over. For example setting num_cycles=2, batch_size=5, cycle_length=3 might return something like this: cycles = [[0, 3, 4, 0], [1, 2, 0, 3]]. This means we have 2 cycles for which the loss will be calculated. The first cycle starts at sequence 0 of the batch, then we find a matching step in sequence 3 of that batch, then we find matching step in sequence 4 and finally come back to sequence 0, completing a cycle. Args: num_cycles: Integer, Number of cycles that will be matched in one pass. batch_size: Integer, Number of sequences in one batch. cycle_length: Integer, Length of the cycles. If we are matching between 2 sequences (cycle_length=2), we get cycles that look like [0,1,0]. This means that we go from sequence 0 to sequence 1 then back to sequence 0. A cycle length of 3 might look like [0, 1, 2, 0]. Returns: cycles: Tensor, Batch indices denoting cycles that will be used for calculating the alignment loss. """ sorted_idxes = torch.arange(batch_size).unsqueeze(0).repeat([num_cycles, 1]) sorted_idxes = sorted_idxes.view([batch_size, num_cycles]) cycles = sorted_idxes[torch.randperm(len(sorted_idxes))].view([num_cycles, batch_size]) cycles = cycles[:, :cycle_length] cycles = torch.cat([cycles, cycles[:, 0:1]], dim=1) return cycles
189c38733fba27b7cc4187491a457d89808871c6
78,879
def contains_names(texnode, names): """ Returns True if `texnode` (env or BraceGroup) contains one of the `names`. """ verdict = False for name in names: if texnode.find(name): verdict = True break return verdict
b6f9310e4de5ae6384b1cdc4e24521aa6b3006b2
78,880
import logging def get_csv_files_from_index(index_file): """ Read the csv filenames from an index file. Returns a list of pairs of filename and tablename. """ results = [] try: fh = open(index_file, 'r') except Exception as e: logging.info("Failed to open index file '%s': %s", index_file, e) return for lineno, line in enumerate(fh): # Skip the first line, its a useless version number. if lineno == 0: continue # Remove newlines at the end. line = line.rstrip('\r\n') # If the line contains a comma (,) then it has filename and tablename. fields = line.split(',') if len(fields) > 1: filename = fields[0] tablename = fields[1] else: tablename = fields[0] filename = fields[0] + ".csv" results.append( [ filename, tablename ] ) fh.close() return results
6207516b205438ec9a3ad3c1d900abe33df49272
78,882
def unpack(iterable): """ Helper function to unpack an iterable """ unpacked = [] for tt in iterable: for t in tt: unpacked.append(t) return unpacked
8233aa4f07c6d6f59a739c827e645ee4afe9db74
78,886
def create_statistics_from_ids(ids): """Create a statistics dictionary from a dict of ids/details for items added, modified and already_there""" return { "added": len(ids["added"]), "modified": len([item for item in ids["modified"] if item["data_changed"]]), "already_there": len(ids["already_there"]), }
8bf5ade7eb727bcc600d4fcd08d68e60410e19b9
78,889
def get_model_to_author_dict(model_name_tuples, lower=True): """ Get a dictionary of model names mapped to author names. Args: model_name_tuples (list or set): An iterable of ModelNames tuples of (model_name, author_name). Each name will be lower-case only. lower (bool, optional): Make all output names lower-case. Defaults to True. Returns: dict: Dictionary mapping model names to author names. Output will only be in lower-case. """ output = {} for (model_name, author_name) in model_name_tuples: if lower: model_name = model_name.lower() author_name = author_name.lower() if model_name in output: output[model_name].append(author_name) else: output[model_name] = [author_name] return output
b489d9d0c0c518dfe9d993552eee052b066f8c8f
78,892
from typing import Optional from typing import Tuple import inspect from pathlib import Path def get_call_site(frames_back: int) -> Optional[Tuple[str, int]]: """ Returns path of the call site file, and line number :param frames_back: number of frames to look back in the call stack for the call site of interest. For example 1 is the call site of the get_relative_call_site itself, 2 is the call site of the function that called get_relative_call_site and so on. """ caller_frame = inspect.stack()[frames_back] caller_fname_path = Path(caller_frame.filename) caller_lineno = caller_frame.lineno return caller_fname_path.as_posix(), caller_lineno
a219328748365115fada865bcc466751916a913e
78,894
def decode_text(text): """Decode a given text string to an Unicode string. If `text` can't be decoded to a common encoding, it will be decoded to UTF-8 passing the "replace" options. """ for enc in ('utf-8', 'iso-8859-15', 'iso-8859-1', 'ascii'): try: return text.decode(enc) except UnicodeDecodeError: continue # fallback return text.decode('utf-8', 'replace')
e83f68ed817c59b2a8fdd0af01c75a80066402ae
78,899
import math import collections def _GetNestingMap(large_k, small_k): """Given two group sizes, computes a "nesting map" between groups. This function will produce a bipartite map between two sets of "group nodes" that will be used downstream to partition nodes in a bigger graph. The map encodes which groups from the larger set are nested in certain groups from the smaller set. As currently implemented, nesting is assigned as evenly as possible. If large_k is an integer multiple of small_k, each smaller-set group will be mapped to exactly (large_k/small_k) larger-set groups. If there is a remainder r, the first r smaller-set groups will each have one extra nested larger-set group. Args: large_k: (int) size of the larger group set small_k: (int) size of the smaller group set Returns: nesting_map: (dict) map from larger group set indices to lists of smaller group set indices """ min_multiplicity = int(math.floor(large_k / small_k)) max_bloated_group_index = large_k - small_k * min_multiplicity - 1 nesting_map = collections.defaultdict(list) pos = 0 for i in range(small_k): for _ in range(min_multiplicity + int(i <= max_bloated_group_index)): nesting_map[i].append(pos) pos += 1 return nesting_map
d98fa2d8af8aef3bc290183baaafe67932874808
78,900
def naka_rushton_no_b(c, a, c50, n): """ Naka-Rushton equation for modeling contrast-response functions. Do not fit baseline firing rate (b). Typical for pyramidal cells. Where: c = contrast a = Rmax (max firing rate) c50 = contrast response at 50% n = exponent """ return (a*(c**n)/((c50**n)+(c**n)))
e218dc2613ad7f48bfc3407f5099ed65c6bd49c6
78,910
def remove_thousands_separator(lines): """Remove thousands separator. e.g. 1,000,00 becomes 1000000 """ return map(lambda s: s.replace(',', '').rstrip(), lines)
22d934bf4e601e6238278ee988fcab4c3ea7d330
78,912
def get_station_mic_output_pair_ui_name(pair): """Gets the UI name of one (station, microphone output) pair.""" station, mic_output = pair mic_output_name = mic_output.name if mic_output_name.endswith(' Output'): mic_output_name = mic_output_name[:-len(' Output')] return station.name + ' / ' + mic_output_name
930445541ee173e27d329d8e6a810540e2276b75
78,913
def _format(self, json_body=False, **format_args): """Get endpoint format values. It caches the format values of endpoints and returns the same object. json_body -- boolean, if set to True all the parameters will be sent as JSON, otherwise the parameters will be sent as query string. format_args -- **kwargs, used to format the endpoint paths strings defined at the inner Meta class. """ self.format_args = format_args self.json_body = json_body return self
53dd66bbf4936e6c2639a60294fb0217595ff395
78,916
def meeting(members: str) -> str: """Sort members by last name. Args: members: <str> a list of members separated by ';' Returns: <str> sorted list of members Examples: >>> s = "Fred:Corwill;Wilfred:Corwill;Barney:Tornbull;Betty:Tornbull;Bjon:Tornbull;Raphael:Corwill;Alfred:Corwill" >>> assert meeting(s) == "(CORWILL, ALFRED)(CORWILL, FRED)(CORWILL, RAPHAEL)(CORWILL, WILFRED)(TORNBULL, BARNEY)(TORNBULL, BETTY)(TORNBULL, BJON)" """ return "".join( sorted( "({1}, {0})".format(*fullname.split(":")) for fullname in members.upper().split(";") ) )
4e250a98ac71773ea78c870b47a723b615487f73
78,919
from typing import List def helper_get_subsequences(s: str) -> List[str]: """Helper function to get subsequences from a string.""" sequence = s.split() if len(sequence) <= 2: return [] answer = [] for left in range(len(sequence) + 1): for right in range(left + 1, len(sequence) + 1): if left == 0 and right == len(sequence): continue answer.append(" ".join(sequence[left:right])) return answer
65e5102f25b6a6ebb9d3741d3447a9a2b2d52063
78,923
def get_unique_user_lexeme(data_dict): """Get all unique (user, lexeme) pairs.""" pairs = set() for u_id in data_dict.keys(): pairs.update([(u_id, x) for x in data_dict[u_id].keys()]) return sorted(pairs)
296157bf1724b30781b18444d56d53a722bc59d5
78,924
def whichMatched(matches, data, show_duplicates = True): """ Simple function to convert output of Matches to DataFrame of all matched observations Parameters ---------- matches : Match Match class object with matches already fit data : DataFrame Dataframe with unique rows, for which we want to create new matched data. This may be a dataframe of covariates, treatment, outcome, or any combination. show_duplicates : bool Should repeated matches be included as multiple rows? Default is True. If False, then duplicates appear as one row but a column of weights is added. Returns ------- DataFrame containing only the treatment group and matched controls, with the same columns as input data """ if show_duplicates: indices = [] for i in range(len(matches.freq)): j = matches.freq[i] while j>0: indices.append(i) j -= 1 return data.iloc[indices] else: dat2 = data.copy() dat2['weights'] = matches.weights dat2['frequency'] = matches.freq keep = dat2['frequency'] > 0 return dat2.loc[keep]
4d91643073333f3072a2fe3c85811b78b1afa88b
78,926
def build_base_url(host, port, protocol): """ Build the base URL for the given parameters and do not explicitly put the standard HTTP(s) ports into the URL. """ base_url = "%s://%s" % (protocol, host) if protocol.lower() == "http" and int(port) != 80: base_url += ":%d" % int(port) elif protocol.lower() == "https" and int(port) != 443: base_url += ":%d" % int(port) base_url += "/" return base_url
5c3d916201f4706b1c8ff36a39eeee6280d60580
78,934
import random import re def sometimes_replace_1x_with_x(expr, var_name): """ Half of the time converts "1x" to "x". "31x", "461x", "4.1x", etc will always remain the same. """ if random.choice([0, 1]): expr = re.sub(r'(?<![0-9.])1{}'.format(var_name), r'{}'.format(var_name), expr) return expr
b5d3d2bd023e0803d504a5a19bc3ad4e40bdb133
78,935
def _get_attr(pyroute2_obj, attr_name): """Get an attribute in a pyroute object pyroute2 object attributes are stored under a key called 'attrs'. This key contains a tuple of tuples. E.g.: pyroute2_obj = {'attrs': (('TCA_KIND': 'htb'), ('TCA_OPTIONS': {...}))} :param pyroute2_obj: (dict) pyroute2 object :param attr_name: (string) first value of the tuple we are looking for :return: (object) second value of the tuple, None if the tuple doesn't exist """ rule_attrs = pyroute2_obj.get('attrs', []) for attr in (attr for attr in rule_attrs if attr[0] == attr_name): return attr[1] return
3d1c4419f9612c6842456ead521da542a1e77e9d
78,939
def _version_supports_flavor(python_version, flavor): """ Whether a `python_version` is compatible with a flavor """ return python_version.flavor.endswith(flavor)
99e030b3264df9fd9ae8c109ac8b874b034a0505
78,946
def normalize_line(groups): """ Takes match groups and uppercases them if they're not None. """ result = [] for g in groups: if g is None: result.append(None) else: result.append(g.upper()) return result
1e61d65dc44d9bc48d2b482335bf968cc86fb3c5
78,949
def ArcAngle(arc) -> float: """Converts the angle returned by pcbnew arc.GetAngle() to degrees. for some reason the returned value from pcbnew is the angle * 10 Args: arc (pcbnew.DRAWSEGMENT): the arc to operate on Returns: float: angle in degrees """ return arc.GetAngle() / 10.0
cf72200dd54e2da19cabde9e54a1473ce942ab9f
78,950
def path_contains_data(bucket, root_path, min_file_size=0, file_extension=None): """Checks if there are any files under this path that contain files of size greater than 0 Args: bucket (boto.s3.bucket.Bucket): bucket within which to check. root_path (str): Should be the path relative to the bucket, does not support wildcards. file_extension (str): optional filter for file type, e.g. setting this to '.gz' will only be True if there are .gz files with in the path. min_file_size (int): sometimes we may have empty gz files so set a minimum file size for returning True. Files of exactly this size will be excluded. Returns: bool """ for key in bucket.list(root_path): if file_extension and not key.name.endswith(file_extension): continue if key.size > min_file_size: return True return False
0d917ede77a7a0959516ee1ab3519858c65c885d
78,953
def get_runs_for_order_and_machine(session, ts, order_id, machine_id): """Collect all the runs for a particular order/machine combo.""" runs = session.query(ts.Run) \ .filter(ts.Run.machine_id == machine_id) \ .filter(ts.Run.order_id == order_id) \ .all() return runs
2dd677afef4da00c8952ff3901cabfa6bc8a03ad
78,954
import torch def log1p(input_): """Wrapper of `torch.log1p`. Parameters ---------- input_ : DTensor Input tensor. """ return torch.log1p(input_._data)
f7552f4d2e672878159987b238852be540e23f62
78,956
def indent(instruction, cmd, line_suffix=' \\'): """Add Docker instruction and indent command. Parameters ---------- instruction : str Docker instruction for `cmd` (e.g., "RUN"). cmd : str The command (lines separated by newline character). line_suffix : str The suffix to append to each line except the last one. Returns ------- dockerfile_chunk : str Instruction compatible with Dockerfile sytax. """ instruction = instruction.upper() amount = len(instruction) + 1 indent = ' ' * amount split_cmd = cmd.splitlines() if len(split_cmd) == 1: return "{} {}".format(instruction, cmd) dockerfile_chunk = '' for i, line in enumerate(split_cmd): if i == 0: # First line. dockerfile_chunk += "{} {}{}".format(instruction, line, line_suffix) # Change the following to use str.join() method. elif i == len(split_cmd) - 1: # Last line. dockerfile_chunk += "\n{}{}".format(indent, line) else: dockerfile_chunk += "\n{}{}{}".format(indent, line, line_suffix) return dockerfile_chunk
5432ecd582142b8f3acc6cbb90c4e5b830aa6d91
78,958
import signal def name2signal(string): """Converts a signal name to canonical form. Signal names are recognized without regard for case: >>> name2signal('sighup') 'SIGHUP' >>> name2signal('SigHup') 'SIGHUP' >>> name2signal('SIGHUP') 'SIGHUP' The leading 'SIG' is not required:: >>> name2signal('hup') 'SIGHUP' >>> name2signal('HUP') 'SIGHUP' Names that are not known cause an exception to be raised:: >>> name2signal('woohoo') Traceback (most recent call last): ValueError: could not convert 'woohoo' to signal name >>> name2signal('sigwoohoo') Traceback (most recent call last): ValueError: could not convert 'sigwoohoo' to signal name Numeric values are converted to names as well:: >>> name2signal(str(signal.SIGHUP)) 'SIGHUP' Numeric values that can't be matched to any signal known to Python are treated as errors:: >>> name2signal('-234') Traceback (most recent call last): ValueError: unsupported signal on this platform: -234 >>> name2signal(str(signal.NSIG)) #doctest: +ELLIPSIS Traceback (most recent call last): ValueError: unsupported signal on this platform: ... Non-signal attributes of the signal module are not mistakenly converted:: >>> name2signal('_ign') Traceback (most recent call last): ValueError: could not convert '_ign' to signal name >>> name2signal('_DFL') Traceback (most recent call last): ValueError: could not convert '_DFL' to signal name >>> name2signal('sig_ign') Traceback (most recent call last): ValueError: could not convert 'sig_ign' to signal name >>> name2signal('SIG_DFL') Traceback (most recent call last): ValueError: could not convert 'SIG_DFL' to signal name >>> name2signal('getsignal') Traceback (most recent call last): ValueError: could not convert 'getsignal' to signal name """ try: v = int(string) except ValueError: if "_" in string: raise ValueError("could not convert %r to signal name" % string) s = string.upper() if not s.startswith("SIG"): s = "SIG" + s v = getattr(signal, s, None) if isinstance(v, int): return s raise ValueError("could not convert %r to signal name" % string) if v >= signal.NSIG: raise ValueError("unsupported signal on this platform: %s" % string) for name in dir(signal): if "_" in name: continue if getattr(signal, name) == v: return name raise ValueError("unsupported signal on this platform: %s" % string)
f30fbc4536177857596d85a93784db9732b1d916
78,963
def _flatten_tools_info(tools_info): """ Flatten the dict containing info about what tools to install. The tool definition YAML file allows multiple revisions to be listed for the same tool. To enable simple, iterattive processing of the info in this script, flatten the `tools_info` list to include one entry per tool revision. :type tools_info: list of dicts :param tools_info: Each dict in this list should contain info about a tool. :rtype: list of dicts :return: Return a list of dicts that correspond to the input argument such that if an input element contained `revisions` key with multiple values, those will be returned as separate list items. """ def _copy_dict(d): """ Iterrate through the dictionary `d` and copy its keys and values excluding the key `revisions`. """ new_d = {} for k, v in d.iteritems(): if k != 'revisions': new_d[k] = v return new_d flattened_list = [] for tool_info in tools_info: revisions = tool_info.get('revisions', []) if len(revisions) > 1: for revision in revisions: ti = _copy_dict(tool_info) ti['revision'] = revision flattened_list.append(ti) elif revisions: # A single revisions was defined so keep it ti = _copy_dict(tool_info) ti['revision'] = revisions[0] flattened_list.append(ti) else: # Revision was not defined at all flattened_list.append(tool_info) return flattened_list
a70a8d0eda786a87da928b3b44561d19de9c1ec7
78,965
def to_vintagestory_axis(ax): """Convert blender space to VS space: X -> Z Y -> X Z -> Y """ if "X" in ax: return "Z" elif "Y" in ax: return "X" elif "Z" in ax: return "Y"
ce585cf95d9663f566ae65d3baa46a2e793bf301
78,967
def resolve_origin(node, symbols): """Resolve the address of the origin.""" return symbols.resolve_expression(node)
1977e516f0b09cc44654974e50cfcd50accde702
78,970
def getReportPeriod(request): """ Return: {string} 'filterReportPeriod' param value based on hierarchy: URL param value -> session (last used) -> none. """ filterVal = request.GET.get('reportperiod', None) filterByLastUsed = request.session.get('filterReportPeriod', 'last90') if not filterVal and filterByLastUsed: filterVal = filterByLastUsed return filterVal
b96bb4bc82e962d8f94cb2327356384b2dd63458
78,971
def scramble_mutation(random, candidate, args): """Return the mutants created by scramble mutation on the candidates. This function performs scramble mutation. It randomly chooses two locations along the candidate and scrambles the values within that slice. .. Arguments: random -- the random number generator object candidate -- the candidate solution args -- a dictionary of keyword arguments Optional keyword arguments in args: - *mutation_rate* -- the rate at which mutation is performed (default 0.1) The mutation rate is applied to the candidate as a whole (i.e., it either mutates or it does not, based on the rate). """ rate = args.setdefault('mutation_rate', 0.1) if random.random() < rate: size = len(candidate) p = random.randint(0, size-1) q = random.randint(0, size-1) p, q = min(p, q), max(p, q) s = candidate[p:q+1] random.shuffle(s) return candidate[:p] + s[::-1] + candidate[q+1:] else: return candidate
d0aec67f2ed8a691f80e64d51e1f738a4c88fdc8
78,977
def is_supported_header(key): """Returns true if a standard supported header.""" # Supported headers for object. supported_headers = [ "cache-control", "content-encoding", "content-type", "content-disposition", "content-language", "x-amz-website-redirect-location", # Add more supported headers here. ] return key.lower() in supported_headers
78c5a4b43de2cee6e44f7450ce0d252fdb4a299d
78,978
def score_seq(motif, seq): """ Given: motif: list of dictionaries of nucleotide --> probability seq: str of part of read sequence must be same length as motif Return: score: float of probability that it would be sampled from motif """ score= 1 for x in range(len(motif)): #no N's allowed in motif try: #bayes rule transition score = score*motif[x][seq[x]] except KeyError: return 0 return score
dc11e2a16c0e93706162d75a6f16bf110147a549
78,980
def time_transformation(obj_dt_time, time_interval): # transform the datetime.time object to time period """ :param obj_dt_time: datetime.time object :param time_interval: in minute, e.g. 15 minutes :return: time period in minutes index from the mid-night, e.g. 5:15 is 5*60 + 15 = 315 """ total_seconds = obj_dt_time.hour * 3600 + obj_dt_time.minute * 60 + obj_dt_time.second trunc_seconds = total_seconds // (time_interval * 60) * (time_interval * 60) time_period = trunc_seconds // 60 return time_period
520d15c987593a3391bb4ac3d02bc8028390d4e9
78,981
def fi_format(p_gene1, p_gene2): """Set predefined format FUSION1--FUSION2 Args: p_gene1 (string): left gene of the fusion p_gene2 (string): right gene of the fusion """ return '{}--{}\n'.format(p_gene1, p_gene2)
a59cb581a147cf6c1265e48eed3b49419b86049a
78,984
import re def match(regex, line): """Match *entire* line to regex converting all spaces into '\s+' and allowing trailing spaces.""" regex = regex.replace(" ", r"\s+") regex += r"\s*\Z" return re.match(regex, line)
2ba01ee264ac1c20eb5923743655db487d7f76fc
78,987
def get_alphabetical_value(string: str) -> int: """Get alphabetical value of a given string `string`. Example: 'COLIN' -> 53 = 3 + 15 + 12 + 9 + 14 """ return sum([(ord(char) - ord('a') + 1) for char in string.lower()])
ec3f7dc1ef129b8edcb7c9fbbf95b5dd93c3a28d
78,990
def sort_by_list(lst, ref_lst, include_missing=True): """ Order the elements of the list by using the priorities given by some reference lst. if include_missing: a=[q, a, e, x, f, t], ref=[x, a, q, e] -> sort_a=[x, a, q, e, f, t] if not include_missing: a=[q, a, e, x, f], ref=[x, a, q, e] -> sort_a=[x, a, q, e] Note that any element in the original list not in original list is dropped if the user specifies not to include it. :param lst: list to sort :type lst: tuple :param ref_lst: list which sets the order of the previous list :type ref_lst: tuple :rtype: tuple """ # Split input list by elements in and not in reference list x_in_ref = tuple(x for x in lst if x in ref_lst) x_missing = tuple(x for x in lst if x not in ref_lst) # Sorted list of elements in th reference sort_lst = tuple(sorted(list(x_in_ref), key=lambda x: ref_lst.index(x))) # If request append the missing elements if include_missing: sort_lst += x_missing return sort_lst
434e737f685d8b919bbee105854c9f6c58cefc52
78,991
def format_size(size, threshold=1536): """Return file size as string from byte size. >>> format_size(1234) '1234 B' >>> format_size(12345678901) '11.50 GiB' """ if size < threshold: return "%i B" % size for unit in ('KiB', 'MiB', 'GiB', 'TiB', 'PiB'): size /= 1024.0 if size < threshold: return "%.2f %s" % (size, unit)
cd214c645bfcfb815090f956b3f1e35d1df90a13
78,992
def compare_dicts(src, dst): """Checks if all keys in src matches the values in dst""" for k, v in src.items(): if k not in dst or v != dst[k]: return False return True
0a8eebf6731d65d37e4db7e147edeb281827ac52
78,993
import math def polar(x): """ Convert a complex from rectangular coordinates to polar coordinates. The function returns a tuple with the two elements r and phi. r is the distance from 0 and phi the phase angle. """ phi = math.atan2(x.imag, x.real) r = math.sqrt(x.real**2+x.imag**2) return r, phi
86237133d532c0c07bd5802e8c106aff680ba951
78,999
from typing import Union def _collate_assayedmolecule_metadata(molecule, key, assay: Union[None, str] = None): """ Batches metadata from AssayedMolecule. Parameters ---------- molecule : malt.Molecule assay : Union[None, str] Filter metadata using assay key. key : str Attribute of class on which to batch. Returns ------- meta : list Elements are metadata from assay. """ meta = [] for record in molecule[assay]: if key not in record: raise RuntimeError(f'`{key}` not found in `metadata`') meta.append(record[key]) return meta
514c6afb5f3e9d15b5d4cd8bc4485095ef3d1723
79,000
import math def corrected_ttr(n_terms, n_words): """ Corrected TTR (CTTR) computed as t/sqrt(2 * w), where t is the number of unique terms/vocab, and w is the total number of words. (Carrol 1964) """ if n_words == 0: return 0 return n_terms / math.sqrt(2 * n_words)
78dcbbd7149ddac62ded37c2a4746e6aa9f8398d
79,002
def get_napari_visual(viewer, layer): """Get the visual class for a given layer Parameters ---------- viewer The napari viewer object layer The napari layer object for which to find the visual. Returns ------- visual The napari visual class for the layer. """ visual = viewer.window._qt_window._qt_viewer.layer_to_visual[layer] return visual
02e543271f16b27047b0eb5b954017251c561e20
79,003
def is_digit(s: str) -> bool: """Alternative to s.isdigit() that handles negative integers Args: s (str): A string Returns: bool: Flag indicating if the input string is a signed int """ try: int(s) return True except: return False
e7e014a9a1e9de79d14dd86de06782239fb0a774
79,005
import re def dashify(string, dash='–'): """Replace dashes with unicode dash.""" try: try: return re.sub(r'-+', dash, string) except TypeError: return re.sub(r'-+', dash, string.content) except AttributeError: return re.sub(r'-+', dash, string.astext())
00d69e75ed1163a3702aa798b444da4ef6570401
79,006
def add_suffix_to_fp(file_path, suffix, path_separator='/'): """ Add a suffix to a file name (not a new file type; e.g. 'file' becomes 'file_name'). Return new filepath. """ new_file_path = '' new_file_path = file_path.split(path_separator) file_name = new_file_path[-1].split('.') file_name[0] += suffix file_name = '.'.join(file_name) new_file_path[-1] = file_name new_file_path = '/'.join(new_file_path) return new_file_path
ffb1e2b9a7009f2627bd71c5c2b63d74c83a798e
79,007
def get_first(s): """Get the first item in a Pandas Series""" return s.iloc[0]
5291072ab8cf32ed46a55c7bd0a7adc5ccfcb8d8
79,010
def abmag_to_image(abmag): """ Convert AB magnitude into HSC image flux unit. """ return 10.0 ** ((27.0 - abmag) / 2.5)
d21e9b3668298de37072a9fe505a948ce87ed87f
79,013
def evaluate_parens(paren_string: str) -> bool: """ Evaluate an expression consisting of ( and ), and ensure that they are balanced. :param paren_string: A string representing the expression :return: True if the ( and ) parentheses are balanced, and False otherwise. >>> evaluate_parens('') True >>> evaluate_parens('()') True >>> evaluate_parens('(()(()))') True >>> evaluate_parens(')') False >>> evaluate_parens('(()') False >>> evaluate_parens('())(') False """ lefts = 0 for ch in paren_string: if ch == '(': lefts += 1 elif ch == ')': if lefts <= 0: return False lefts -= 1 return lefts == 0
27855a31f279a7a7c2a82f8d86630ba979723873
79,017
def calculate_overshoot(cigar): """ The calculate overshoot function calculates the number of basepairs that have not been mapped but are part of the read. :param cigar: a list containing tuples representing the cigar string. :return overshoot: an integer indicating the number of basepairs in the read before it is mapped. """ overshoot = 0 for element in cigar: if element[0]: overshoot += element[1] else: break return overshoot
687ce44bef05376339fbe7892b1c07b84981710f
79,018
def xvar(self, n="", **kwargs): """Specifies the X variable to be displayed. APDL Command: XVAR Parameters ---------- n X variable number: 0 or 1 - Display PLVAR values vs. time (or frequency). n - Display PLVAR values vs. variable n (2 to NV [NUMVAR]). 1 - Interchange time and PLVAR variable numbers with time as the curve parameter. PLVAR variable numbers are displayed uniformly spaced along X-axis from position 1 to 10. Notes ----- Defines the X variable (displayed along the abscissa) against which the Y variable(s) [PLVAR] are to be displayed. """ command = f"XVAR,{n}" return self.run(command, **kwargs)
2d38b36f497b50a4ba4a65cea9fecab1152f8959
79,020
def get_pip_package_key(candidate): """Get package key for the provided string. The string may be a name and version. """ return str(candidate).split()[0].split('==')[0]
323deeee59e5b454e7291c944477471b2ca61aa0
79,023
def create_expected_repr(instance, fields): """ Create the expected string representation of an instance. Args: instance: The instance to create the expected repr of. fields: An array of field names that should be in the repr. Returns: The expected output of ``repr(instance)``. """ values = [f"{field}={repr(getattr(instance, field))}" for field in fields] return f"{instance.__class__.__name__}({', '.join(values)})"
d95c5e2d1bf9dd7cd58d9802fa04a14b3bb5c794
79,025
def decode_signed_byte(value: str) -> int: """ Convert a hex byte to a signed int. Copied from GE's hextodec method. """ val = int(value, 16) if val > 128: return val - 256 return val
d05eeb9cd32f969b3a531b0351cbbd801fd64c60
79,029
def get_child_nodes_osd_tree(node_id, osd_tree): """ This function finds the children of a node from the 'ceph osd tree' and returns them as list Args: node_id (int): the id of the node for which the children to be retrieved osd_tree (dict): dictionary containing the output of 'ceph osd tree' Returns: list: of 'children' of a given node_id """ for i in range(len(osd_tree["nodes"])): if osd_tree["nodes"][i]["id"] == node_id: return osd_tree["nodes"][i]["children"]
3f0f146a3449974930f80195da033d56e97b56be
79,037
import re def camel_case_to_underscore(name): """ Converts a camelCase string to a name_with_underscores. """ return re.sub("[A-Z]+", lambda m: "_" + m.group(0).lower(), name, 0)
0c89ce1aa903e917b3ed6bedc85a0e8aae822ae1
79,041
from typing import Dict from typing import Any def get_contexts_for_widgets(response) -> Dict[str, Dict[str, Any]]: """ Searches through the response's context for subcontexts of widgets, and returns a dictionary with the widgets' `id` attribute as key and the widget's context dictionary as value. """ widget_contexts = {} for subcontext in response.context: if "widget" not in subcontext: continue for subcontext_dict in subcontext: if type(subcontext_dict) is not dict or "widget" not in subcontext_dict: continue widget = subcontext_dict["widget"] widget_id = widget["attrs"]["id"] widget_contexts[widget_id] = subcontext_dict return widget_contexts
a1055a8e3b4d97007acbdb266b2fd08d59177227
79,053
import re def remove_non_printable_characters(text): """Removes non-printable characters from the given text""" return re.sub(r'[^\x00-\x7F]+', '', text)
e5ee9aae2f61ef5772609dadd2252b6ce7029b8e
79,057
def generate_dns_record_parameters(domain_name=None, record_type=None, host='', record=None, ttl=None, validate_record_as='valid', **kwargs): """Generates parameters for a generic DNS record. :param domain_name: string, (required) the domain name to use for the record :param record_type: string, the type of domain to create :param host: string, the host for this record. Use '' (empty string) for top-level domain :param record: string, (required) the record to be added or updated :param ttl: int, (required) the time-to-live for this record :param validate_record_as: string, what to validate the record parameter as """ params = { 'domain-name': domain_name, 'host': { 'value': host, 'optional': True }, 'ttl': ttl, 'record': { 'value': record, 'validate_as': validate_record_as } } if record_type: params['record-type'] = record_type return params
5749f52d17a74e300db9aae4ac972de749ee264e
79,058
def c_to_f(temp): """ Convert Celsius to Fahrenheit Args: temp (int, float, numpy.ndarray, list, tuple): Temperature in Celsius Returns: (int, float, numpy.ndarray, list, tuple): Temperature in Fahrenheit """ if type(temp) is list or type(temp) is tuple: return [c * 1.8 + 32 for c in temp] else: return temp * 1.8 + 32.0
540c633430118de3f24e358adc6adbf561ff4653
79,060
def chunk_string(input_str, length): """ Splits a string in to smaller chunks. NOTE: http://stackoverflow.com/questions/18854620/ :param input_str: The input string to chunk. :type input_str: str :param length: The length of each chunk. :type length: int :return: A list of the input string as smaller chunks. :rtype: list """ return list((input_str[0 + i:length + i] for i in range(0, len(input_str), length)))
9ae8661c5bc53437ce28b9340fd7b6bf822e9d2d
79,066
def usd(value): """Formats value as USD.""" return "${:,.2f}".format(value)
20c54a9286e3e9ead932d584f5df9885b965bfdd
79,067
def validate_geometry(screen_geometry): """Raise ValueError if 'screen_geometry' does not conform to <integer>x<integer> format""" columns, rows = [int(value) for value in screen_geometry.lower().split('x')] if columns <= 0 or rows <= 0: raise ValueError('Invalid value for screen-geometry option: "{}"'.format(screen_geometry)) return columns, rows
c0c6a10cdc93c2461bf593ca530cadb9538537b8
79,071
import pathlib def build_filename(working_directory, # type: str tag_name, # type: str extension='.in', # type: str prefix='src', # type: str ): # type: (...) -> pathlib.Path """Constructs a path to a tagged requirement source file. Args: working_directory: The parent directory of the source file. tag_name: The tag name. extension: The file extension. Defaults to *.in*. prefix: The subdirectory under the working directory to create the source file in. Defaults to *src*. """ wd = pathlib.Path(working_directory) / prefix return wd / ''.join((tag_name, extension))
c9799e638415531bff1f9c87eff8644357a5ddae
79,073
def quote(arg: object) -> object: """ Puts quotes around a string (so it appears as a string in output). Otherwise returns argument unchanged. :param arg: argument to transform. :return: argument quoted if a string, otherwise unchanged. """ if isinstance(arg,str): return f"'{arg}'" else: return arg
746989bea154ca58309bed24b97765fc1ef86427
79,075
def FormatIso8601(t): """ Convert a time expressed in seconds to ISO 8601 time format string :type t: ``int`` :param t: Time expressed in seconds (required) :return: An ISO 8601 time format string :rtype: ``str`` """ return t.strftime("%Y-%m-%dT%H:%M:%S")
d24e62a419d90d398543cc082f470a8df4a1b68c
79,083
from typing import List def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float: """ Finds the median of 2 sorted arrays First extends the nums1 array with nums2 array. This can be done with nums1 + nums2 Sort the newly formed array & determine the mid points, mid1 & mid2 If we have an even number of items, return the average of the 2 middle items Else we just return the middle value of the newly formed array """ nums1.extend(nums2) nums1.sort() length = len(nums1) mid1 = (length - 1) // 2 mid2 = length // 2 # if we have an even number of items if length % 2 == 0: return (nums1[mid2] + nums1[mid2 - 1]) / 2 return nums1[mid1]
b6826b93f788c74ee8ca98947b7136295f290af0
79,086
import requests def get_str_from_url(url, encoding=None): # pragma: no cover """Read a string from the URL. Args: url: some URL encoding: override the encoding that would have determined automatically (Default value = None) Returns: the string """ req = requests.get(url) if encoding is not None: req.encoding = encoding return req.text
ede6658f8419212e12d997f34d247be05721fd7f
79,089
import re def extract_version_tag_from_url(url): """Extract a DataONE API version tag from a MN or CN service endpoint URL. Args: url : str Service endpoint URL. E.g.: ``https://mn.example.org/path/v2/object/pid``. Returns: str : Valid version tags are currently ``v1`` or ``v2``. """ m = re.match(r"(/|^)(v\d)(/|$)", url) if not m: return None return m.group(2)
b3ef81a3cf71bd7e2b155191431c5e3c878014c6
79,095
def sum_of_digits_compact(n: int) -> int: """ Find the sum of digits of a number >>> sum_of_digits_compact(12345) 15 >>> sum_of_digits_compact(123) 6 >>> sum_of_digits_compact(-123) 6 >>> sum_of_digits_compact(0) 0 """ return sum(int(c) for c in str(abs(n)))
14bdfc4c711c1cb6cae5c9631a3debd7628f0e4c
79,096
def make_initial_state(supporters): """ Creates a string of alternating Tottenham(T) and Arsenal(A) supporters of length 'supporters' with two blank seats represented by two underscores at the end of the string. make_initial_state(int) -> str """ return "TA"*supporters + "__"
ca44f347b922e2f4e24b8962463a3aac22fbb9f7
79,097
def circular_shift_sample_indices(indices, n_samples_shift, n_samples_total): """Shifts sample index values circularly by speficied amount :param numpy.ndarray indices: shape (N,) in dtype int :param int n_samples_shift: number of samples to shift, can be positive or negative :param int n_samples_total: total number of samples in the data :return: shifted indices :rtype: numpy.ndarray """ return (indices + n_samples_shift) % n_samples_total
5974b7397dc048bb71fa013e95ad1be9aa4c4e66
79,099
def d2s(date): """ 日期转为字符串 >>> d2s(datetime.date(2016, 8, 16)) '2016-08-16' :param date: :return: """ return date.strftime("%Y-%m-%d")
a70eaa2eec84f87db97243f20007600295c5b4cf
79,108
def stop_screencast() -> dict: """Stops sending each frame in the `screencastFrame`. **Experimental** """ return {"method": "Page.stopScreencast", "params": {}}
743f604f9e4278633647e65f3e5693eb92882976
79,109
def is_bottom(module_index): """Returns True if module is in the bottom cap""" return ( (module_index>=600)&(module_index<696) )
88b778141776190fecaaa813e6ffcfd418abfd5f
79,117