content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def make_word(letters, beginnings=[], endings=[]): """Return word dictionary""" return { 'word': ''.join(letters), 'letters': letters, 'beginnings': beginnings, 'endings': endings, }
0b53fd8e26d2ca80e8c54ffa08f9473fb7b9f902
10,379
def mutate(mushroom, i): """ Randomly flips one bit in a mushroom bit string""" return mushroom ^ 1 << i
ffac7c56cfe455928fcbb833d363441ef9a2a8a7
10,384
def avoid_multiple_update(func): """Decorator to prevent handlers to update multiple times.""" def wrapper(self, attr, old, new): if self.in_update: return self.in_update = True func(self, attr, old, new) self.in_update = False return wrapper
d35e9add88ddb62d1368ff0fbf94e9637518c249
10,385
import numpy as np def create_slice_mask(plane_dict, infile_dims): """Create a binary array defining a mask covering the area below a given plane in the 3D image. :type plane_dict: dict :param plane_dict: A dictionary matching z voxel coordinates to corresponding (x,y) coordinates. :type infile_dims: list :param infile_dims: A list of the NIFTI file's dimensions. :rtype: NumPy array :return: A NumPy array defining the binary mask of the slice. """ mask_array = np.zeros(infile_dims) for x in range(0, infile_dims[0]): for y in range(0, infile_dims[1]): for z in range(0, infile_dims[2]): if plane_dict[(x, y)] > z: mask_array[x, y, z] = 1 return mask_array
ca25e627dc93b3828b7ea49c139e1f9fc5950f6e
10,386
def unsigned_to_signed(seq, width): """ Convert unsigned integer to signed. """ out = [] k = pow(2, width) for x in seq: if x >= k/2: x -= k out.append(x) return out
b7b90ff4de576bee2e91af27f6fc3b509608ef34
10,387
def containerhasprefix(obj, prefix): """ Returns whether obj's container starts with prefix. """ return obj.container.name.startswith(prefix)
dcf4b14cfa1ac15355e7db5110be55b623e35f20
10,389
def _variance(mean_variance, samples): """Perform final variance calculations. `mean_variance` and `samples` were pre-recorded with `options.record.reduction._variance`. This operation only calculates total running variance. This function is applied on per-recording basis (per layer to be exact). Parameters ---------- mean_variance : Tuple[torch.Tensor] Running tensor containing running mean and running variance samples : int How many samples passed through this tensor (used for taking the mean). Returns ------- torch.Tensor Tensor containing per-neuron variance (exact same shape as output layer) """ mean = mean_variance[0] / samples variance = mean_variance[1] variance /= samples variance -= mean * mean return variance
0bcc9705205304d8338843a3c09c8ae1f2314bb6
10,390
import six def choose_part_ranges(content_length, minimum_part_size): """ Returns a list of (offset, length) for the parts of a large file. :param content_length: content length value :type content_length: int :param minimum_part_size: a minimum file part size :type minimum_part_size: int :rtype: list """ # If the file is at least twice the minimum part size, we are guaranteed # to be able to break it into multiple parts that are all at least # the minimum part size. assert minimum_part_size * 2 <= content_length # How many parts can we make? part_count = min(content_length // minimum_part_size, 10000) assert 2 <= part_count # All of the parts, except the last, are the same size. The # last one may be bigger. part_size = content_length // part_count last_part_size = content_length - (part_size * (part_count - 1)) assert minimum_part_size <= last_part_size # Make all of the parts except the last parts = [(i * part_size, part_size) for i in six.moves.range(part_count - 1)] # Add the last part start_of_last = (part_count - 1) * part_size last_part = (start_of_last, content_length - start_of_last) parts.append(last_part) return parts
28acbe2df14d6f54a95ab74c18e410a076354187
10,391
import requests def check_user_permission(access_token, job_requires): """ Check if user has permission to run the job or not Args: access_token(str): the access token job_requires(dict): requirements so that job can run { "arborist_url": "http://arborist-service", "job_access_req": ( [ {"resource": "/sower", "action": {"service": "job", "method": "access"}}, {"resource": "/programs", "action": {"service": "indexd", "method": "write"}}, ], ) } Returns: bool: if user has permission to run the job or not dict: a message log """ params = { "user": {"token": access_token}, "requests": job_requires["job_access_req"], } response = requests.post( "{}/auth/request".format(job_requires["arborist_url"].strip("/")), headers={"content-type": "application/json"}, json=params, ) if response.status_code != 200: return ( False, {"message": "Can not run the job. Detail {}".format(response.json())}, ) elif not response.json()["auth"]: return (False, {"message": "User does not have privilege to run the job"}) else: return True, {"message": "OK"}
c1069364b8d59965f78367e64b88206062c60b2b
10,392
def slice_array_d50(df,idNode): """ This function allows to slice the array resulting from the create_array function. The slicing is performed by a boolean mask in which the array is filtered according to the ID_Node specified by the user and then only the columns corresponding to time and mean diameter are extracted. :param df (can be the array of the AL or UL), idNode """ array = df[df["Node ID"] == idNode] subarray = array.iloc[:, [1, 11]] return subarray
565c4ea76de08a493f028e32052fcef60e7f42dd
10,393
def to_upper(string): """The upper case version of a string""" return string.upper()
8d3bb60b7b4704479cb642b9ab910f7a1469fd44
10,394
def _after_arg(arg_name): """Return a finder for argument after arg_name.""" def generate(transform): """Generate three args, OTHER, arg_name and ARGUMENT.""" return "ONE {0} {1}".format(arg_name, transform("ARGUMENT")) return generate
0a0306760e8e9a2ad356a4f2d9ae2c6b76d53d2e
10,396
import torch def get_src_xyz_from_plane_disparity(meshgrid_src_homo, mpi_disparity_src, K_src_inv): """ :param meshgrid_src_homo: 3xHxW :param mpi_disparity_src: BxS :param K_src_inv: Bx3x3 :return: """ B, S = mpi_disparity_src.size() H, W = meshgrid_src_homo.size(1), meshgrid_src_homo.size(2) mpi_depth_src = torch.reciprocal(mpi_disparity_src) # BxS # print(K_src_inv.size()) K_src_inv_Bs33 = K_src_inv.unsqueeze(1).repeat(1, S, 1, 1).reshape(B * S, 3, 3) # 3xHxW -> BxSx3xHxW meshgrid_src_homo = meshgrid_src_homo.unsqueeze(0).unsqueeze(1).repeat(B, S, 1, 1, 1) meshgrid_src_homo_Bs3N = meshgrid_src_homo.reshape(B * S, 3, -1) xyz_src = torch.matmul(K_src_inv_Bs33, meshgrid_src_homo_Bs3N) # BSx3xHW xyz_src = xyz_src.reshape(B, S, 3, H * W) * mpi_depth_src.unsqueeze(2).unsqueeze(3) # BxSx3xHW xyz_src_BS3HW = xyz_src.reshape(B, S, 3, H, W) return xyz_src_BS3HW
91492ba8782607e8500de5b886f9339b041a3513
10,397
def trimmed_split(s, seps=(";", ",")): """Given a string s, split is by one of one of the seps.""" for sep in seps: if sep not in s: continue data = [item.strip() for item in s.strip().split(sep)] return data return [s]
192c75f7e346860010031cf9e621d7bb5664dde4
10,398
def descendant_selected(node): """Returns true if this node or a descendant of it is selected. Use as e.g.: {% if descendant_selected node %} """ if node.selected: return True for child in node.children: if descendant_selected(child): return True return False
974b95b8d658ec173ccbbf9488f309529c1f0d86
10,399
def mock_clipboard(monkeypatch, request): """Fixture mocking clipboard IO. This mocks pandas.io.clipboard.clipboard_get and pandas.io.clipboard.clipboard_set. This uses a local dict for storing data. The dictionary key used is the test ID, available with ``request.node.name``. This returns the local dictionary, for direct manipulation by tests. """ # our local clipboard for tests _mock_data = {} def _mock_set(data): _mock_data[request.node.name] = data def _mock_get(): return _mock_data[request.node.name] monkeypatch.setattr("pandas.io.clipboard.clipboard_set", _mock_set) monkeypatch.setattr("pandas.io.clipboard.clipboard_get", _mock_get) yield _mock_data
7acf72b45050d35cd57ddf9310304950610744c5
10,403
def fileNumber(filePath): """ Get the number of the file. foo.0080 would return 0080 """ num = filePath.split('.')[-1] return int(num)
731918660f0c145c15ba99c2f5eb8be9745e8576
10,404
def get_top_adj(full_results, num): """ Takes dictionary of results from run_adj_analysis and number of top results to return. Returns the top num adjectives associated with male pronouns and female pronouns. :param full_results: dictionary from result of run_adj_analysis :param num: number of top results to return per gender :return: tuple of lists of top adjectives associated with male pronouns and female pronouns, respectively """ male_adj = [] female_adj = [] for adj, val in full_results.items(): male_adj.append((val[0]-val[1], adj)) female_adj.append((val[1]-val[0], adj)) male_top = sorted(male_adj, reverse=True)[0:num] female_top = sorted(female_adj, reverse=True)[0:num] return male_top, female_top
0eddb189f79bc9b18f2994f2a99aef1b07181691
10,405
def manhattan_distance(x, y): """ Returns the Manhattan (City Block) distance between two lists """ return sum(abs(a - b) for a, b in zip(x, y))
4887024603a8fe3398ec80a17d1d70fbe15fdfab
10,407
def starting_coordinates(): """ Starting coordinates on a pixel matrix. """ return (10, 10)
68f8b493e36cba56808cfbcea6a4ee9368e0c720
10,409
def strip_html_tags(i_string): """ Quick solution to remove html tags being returned in episode description. Will probably replace eventually. """ strippable = ('<p>', '</p>') spaces = ('&nbsp;',) breaks = ('<br>',) for s in strippable: i_string = i_string.replace(s, '') for s in spaces: i_string = i_string.replace(s, ' ') for s in breaks: i_string = i_string.replace(s, '\n') return i_string
9454c7ee5e9006756426ca1fdecb97df8beb2cc4
10,410
import copy import collections def get_db_data(relation_data, unprefixed): """Organize database requests into a collections.OrderedDict :param relation_data: shared-db relation data :type relation_data: dict :param unprefixed: Prefix to use for requests without a prefix. This should be unique for each side of the relation to avoid conflicts. :type unprefixed: str :returns: Order dict of databases and users :rtype: collections.OrderedDict """ # Deep copy to avoid unintentionally changing relation data settings = copy.deepcopy(relation_data) databases = collections.OrderedDict() # Clear non-db related elements if "egress-subnets" in settings.keys(): settings.pop("egress-subnets") if "ingress-address" in settings.keys(): settings.pop("ingress-address") if "private-address" in settings.keys(): settings.pop("private-address") singleset = {"database", "username", "hostname"} if singleset.issubset(settings): settings["{}_{}".format(unprefixed, "hostname")] = ( settings["hostname"]) settings.pop("hostname") settings["{}_{}".format(unprefixed, "database")] = ( settings["database"]) settings.pop("database") settings["{}_{}".format(unprefixed, "username")] = ( settings["username"]) settings.pop("username") for k, v in settings.items(): db = k.split("_")[0] x = "_".join(k.split("_")[1:]) if db not in databases: databases[db] = collections.OrderedDict() databases[db][x] = v return databases
0e2a30624f35f49119ae9bd275153d5e9fdf7503
10,412
def convert(tree,fileName=None): """ Converts input files to be compatible with merge request #412, where we switch from custom XML pathing to standard XPATH nomenclature. @ In, tree, xml.etree.ElementTree.ElementTree object, the contents of a RAVEN input file @ In, fileName, the name for the raven input file @Out, tree, xml.etree.ElementTree.ElementTree object, the modified RAVEN input file """ simulation = tree.getroot() models = simulation.find('Models') if models is None: return tree # no models, no BasicStats for model in models: if model.tag == 'PostProcessor' and model.attrib['subType'] == 'RavenOutput': for child in model: if child.tag == 'File': for cchild in child: if cchild.tag == 'output': cchild.text = cchild.text.replace('|','/') return tree
841f491370dc07d77f9af5428d78486136220ce9
10,413
def get_username(prompt: str) -> str: """Prompt the user for a username""" username = input(prompt) return username
c6d6119ba9b2b1ec9408501afb04b513aa9b4965
10,416
def remove_low_information_features(feature_matrix, features=None): """Select features that have at least 2 unique values and that are not all null Args: feature_matrix (:class:`pd.DataFrame`): DataFrame whose columns are feature names and rows are instances features (list[:class:`featuretools.FeatureBase`] or list[str], optional): List of features to select Returns: (feature_matrix, features) """ keep = [c for c in feature_matrix if (feature_matrix[c].nunique(dropna=False) > 1 and feature_matrix[c].dropna().shape[0] > 0)] feature_matrix = feature_matrix[keep] if features is not None: features = [f for f in features if f.get_name() in feature_matrix.columns] return feature_matrix, features return feature_matrix
0377b2b87d04ddaa89e332ed72c12a05b4a1c3e6
10,417
import hashlib def md5_password(password, salt): """获取原始密码+salt的md5值 """ trans_str = password + salt md = hashlib.md5() md.update(trans_str.encode('utf-8')) return md.hexdigest()
cbb4306bfa1e11957a56d5ace5d810ac421f5d83
10,418
def bit(value, position, length=1): """ Return bit of number value at position Position starts from 0 (LSB) :param value: :param position: :param length: :return: """ binary = bin(value)[2:] size = len(binary) - 1 if position > size: return 0 else: return int(binary[size - position: size - position + length], 2)
f639eae3d3f260b5b0d29ddd6a3ad99c89267704
10,420
def diff_template(page, label=None): """ Return a Template:Diff2 string for the given Page. """ if label is None: label = page.title() return f"{{{{Diff2|{page.latest_revision_id}|{label}}}}}"
48cca6ad78ce2e0f2e1ce4902d56734fccf45030
10,421
import re def make_dataset_name(name): """Dataset name contains "letters, numbers, -, _" only, any other content will be replaced with "-" """ def may_replace(c): if re.match("\w", c) is None: if c == '-': return c else: return "_" else: return c return "".join([may_replace(c) for c in name])
bf97474ffcb0c5e0354c724ca4defea1b04f8939
10,422
def averageOfNumbers(): """ averageOfNumbers Outputs the average of a series of numbers entered by the user. returns: The average of the numbers input by the user. """ numbers = [] amount_of_numbers = input("How many numbers do you wish to input? ") for x in range(1, int(amount_of_numbers)+1): entered_number = int(input("%d. Enter a number: "%(x))) numbers.append(entered_number) return sum(numbers) / len(numbers)
3403bf775f0675e0f54dc18f5146e37989fc0ce2
10,423
import math def calc_entropy(data, base=2): """ Calculate the entropy of data. Using documentation from scipy.stats.entropy as the basis for this code (https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.entropy.html). :param data: Measure the entropy of this object :return: Calculated entropy value """ if not data: return 0 # calculate frequency list chars = set(data) frequencies = [float(data.count(ch)) / len(data) for ch in chars] # calculate shannon entropy H = -sum([freq * math.log(freq) / math.log(base) for freq in frequencies ]) return H
72a9d111120415471c7d54e11c862ce8ebde4a55
10,424
def getValidPaths(author_collabs, year): """ selects the neighbours with valid paths returns a list where each result is a tuple of author name, collaborations < year """ valid_neighbours = list() for n in author_collabs: valid_collabs = list() for c in author_collabs[n]: if c < year: valid_collabs.append(c) if len(valid_collabs) > 0: valid_neighbours.append((n, list(valid_collabs))) return valid_neighbours
b9cc56b3bee490296afb9448c6082eb0206a74e7
10,425
def count_unplayed_cards(r, progress): """Returns the number of cards which are not yet played, including cards which are unplayable because all those cards (or cards of a value below it) are already discarded""" n = 0 for suit in r.suits: n += 5 - progress[suit] return n
445b1b3d07aff4c2010fc482cb7f5b55a8250bac
10,426
def toFENhash(fen): """ Removes the two last parts of the FEN notation """ return ' '.join(fen.split(" ")[:-2])
bc2575ce35f8d2678753369d2b3a4d050388891f
10,428
def remap_lads(lad_name): """Some LADs of MSOA census data do not match the LAD geographies of the LAD population data """ if lad_name == 'E06000048': lad_mapped = 'E06000057' #Northumberland elif lad_name == 'E08000020': lad_mapped = 'E08000037' elif lad_name == 'E07000097': lad_mapped = 'E07000242' elif lad_name == 'E07000100': lad_mapped = 'E07000240' elif lad_name == 'E07000101': lad_mapped = 'E07000243' elif lad_name == 'E07000104': #Welwyn Hatfield lad_mapped = 'E07000241' else: lad_mapped = lad_name return lad_mapped
46e13886a4541fcd5a47870d845db717373084cc
10,429
def validate(detector, val_data, metric): """Test on validation dataset.""" metric.reset() for img, label in val_data: # scores, bboxes = detector.detect(img) scores, bboxes = detector.ms_detect(img) metric.update(bboxes, scores, label) return metric.get()
7b9f3a5405ea40eba37765d7fbc5b204203c96de
10,430
def _construct_request_parameters(args: dict, keys: list, params={}): """A helper function to add the keys arguments to the dict parameters""" parameters = {} if params is not None: for p in params: parameters[p] = params[p] for (arg_field, filter_field) in keys: value = args.get(arg_field, None) if value is not None: parameters[filter_field] = value return parameters
a268ddf8612014a4a4012eab8cfae50d1437be1e
10,432
from typing import Dict from typing import List from typing import Tuple def votes_per_month(values: Dict[str, List[str] | Tuple[str]]) -> Dict[str, Dict[str, int]]: """ :param values: a dictionary formatted like: {month: list of votes} :returns: dictionary every person for every month """ all_persons = set([person.lower().replace(" ", "") for element in values.values() for persons in element for person in persons.split("|")]) out = {} for month in values: values_now = [element.lower().replace(" ", "") for element in values[month]] if month not in out: out[month] = {} for person in all_persons: out[month][person] = values_now.count(person) return out
acff0d2aaff68b12430c7d28f7f7f7b0f08aaa3c
10,433
def move_cursor(move): """Return move value for cursor.""" return { 'left': -1, 'right': 1, }[move]
333c3b6b0a8b3e936250f47b16019420da2a3670
10,434
import math def gcd(*nums): """ Find the greatest common divisor (GCD) of a list of numbers. Args: *nums (tuple[int]): The input numbers. Returns: gcd_val (int): The value of the greatest common divisor (GCD). Examples: >>> gcd(12, 24, 18) 6 >>> gcd(12, 24, 18, 42, 600, 66, 666, 768) 6 >>> gcd(12, 24, 18, 42, 600, 66, 666, 768, 101) 1 >>> gcd(12, 24, 18, 3) 3 """ gcd_val = nums[0] for num in nums[1:]: gcd_val = math.gcd(gcd_val, num) return gcd_val
ea3b2f55ab1db530a2c0e5d472c053b9e538f3e0
10,436
import cmath def rotate_points(points, phase_shift): """ Rotate a point about the origin. Arguments: points: iterable(complex) Points to rotate in the complex plane. phase_shift: Magnitude of rotation in radians. Returns: rotated_points: list(complex) Points rotated about the origin. """ rotated_points = [] for point in points: rad, phase = cmath.polar(point) rotated_points.append(cmath.rect(rad, phase + phase_shift)) return rotated_points
e9ae43774bc8f5ac770413f6e758d41223fb3c00
10,438
def tupleize(func): """A decorator that tuple-ize the result of a function. This is useful when the evaluation function returns a single value. """ def wrapper(*args, **kargs): return func(*args, **kargs), return wrapper
2a2a9d709177868bd47571f86ae026d666b2593b
10,439
def vocabulary(word_counts): """ :param word_counts: dictionary of each word count :return: list of vocabulary """ vocabulary = list(map(lambda x: x[0], sorted(word_counts.items(), key=lambda x: -x[1]))) return vocabulary
2e7b77fe8e69ba4dd6c9136c3e80b16f02a56d49
10,440
import torch def rejoin(chunked, initial_shape): """ Rejoins chunked tensor, removing the padding as necessary >>> eq = lambda a, b: torch.all(torch.lt(torch.abs(torch.add(a, -b)), 1e-12)) >>> x = torch.arange(end=4) + 3 >>> y = torch.arange(end=15) + 2 >>> mesh = x.view(-1, 1) @ y.view(1, -1) >>> mesh = torch.stack([mesh, mesh + 1, mesh + 2], dim=0) First we create an array. I don't know why I created it in such a silly way. Next, we'll show that chunking/rejoining result in the exact same array, despite the fact that some of the chunks are padded! >>> mesh.shape torch.Size([3, 4, 15]) >>> chunks(mesh, 3).shape torch.Size([1, 2, 5, 3, 3, 3]) >>> rejoined = rejoin(chunks(mesh, 3), mesh.shape) >>> rejoined.shape torch.Size([3, 4, 15]) >>> torch.equal(mesh, rejoined) True Great! Now we can try specifying a chunk size that is smaller than the minimum dimension, and it still works. >>> initial = torch.arange(512).view(8, 8, 8) >>> chunked = chunks(initial, 9) >>> reconstructed = rejoin(chunked, (8, 8, 8)) >>> torch.equal(initial, reconstructed) True :param chunked: a chunked tensor created by `chunks` :param initial_shape: the initial shape of the tensor before chunking :return: tensor in the shape `initial_shape`, dimensions `i` and `i + len(initial_shape)` are joined """ indices = [] padded_shape = [] for i in range(len(initial_shape)): indices.append(i) indices.append(i + len(initial_shape)) padded_shape.append(chunked.shape[i] * chunked.shape[len(initial_shape) + i]) repermuted = chunked.permute(*indices) padded = repermuted.reshape(*padded_shape) for i, s in enumerate(initial_shape): padded = torch.narrow(padded, i, 0, s) return padded
6bcf5bf07b813b79245b50c72e67a98e575df5f9
10,441
def transient_provider(func): """ Decorator to mark a provider as transient """ func.transient = True return func
2f540fc3099c3fc71ac49ce44dbd69a042b9e39f
10,442
def validate_keep(keep): """validates the value of the keep parameter If it's not coercable to an int or equal to the special string values, raise a ValueError. Otherwise, return `keep`. :param keep: value to validate :type keep: int or str :return: the validated value of keep :rtype: either an int or the special value 'all' :raises TypeError: if `keep` can't be coerced to an int """ if keep != 'all': keep=int(keep) return keep
5a1d03140eeab9bef1f3ae417c3d3fc77b8499bd
10,443
import os def default_database(): """ Returns DATABASE if env is set """ return os.environ.get('DATABASE', '')
a4e1ccfd916e76e0ed8eea79ca519711cbcaced1
10,444
def levensthein_dist(input_command: str, candidate: str) -> int: """ Implement the Levenshtein distance algorithm to determine, in case of a non-existing handle, if theres a very similar command to suggest. :param input_command: The non-existing handle the user gave as input :param candidate: The (possible similar) alternative command :return: The similarity between the two strings measured by the levensthein distance """ if not input_command or not candidate: return max(len(input_command), len(candidate)) # at least one string is empty dp_table = [[0 for col in range(len(input_command) + 1)] for row in range(len(candidate) + 1)] dp_table[0] = list(range(0, len(input_command) + 1)) for i in range(1, len(candidate) + 1): dp_table[i][0] = i # now choose minimum levensthein distance from the three option delete/replace/insert # if chars are the same -> levensthein distance is the same as for those substring without these chars of input_command # and candidate for i in range(1, len(candidate) + 1): for j in range(1, len(input_command) + 1): # choose minimum edit distance from delete, replace or insert at current substring if input_command[j - 1] == candidate[i - 1]: dp_table[i][j] = dp_table[i - 1][j - 1] else: dp_table[i][j] = min(min(dp_table[i][j - 1], dp_table[i - 1][j - 1]), dp_table[i - 1][j]) + 1 return dp_table[len(candidate)][len(input_command)]
02506be8655f97a60665a507cfa62cb9703590ef
10,446
import inspect import subprocess def process(execute_kwargs=None): """Function for execute a set of command lines""" if not execute_kwargs: execute_kwargs = {} commands = execute_kwargs["commands"] if not isinstance(commands, list): commands = [execute_kwargs["commands"]] output_results = [] for command in commands: if isinstance(command, str): prepared_command = command.split() elif isinstance(command, list): prepared_command = command else: raise TypeError("Incorrect command type handed to process") # Subprocess run_kwargs = {} available_arguments = inspect.getfullargspec(subprocess.run) if ( "capture_output" in available_arguments.kwonlyargs and "capture" in execute_kwargs ): run_kwargs["capture_output"] = execute_kwargs["capture"] else: if execute_kwargs["capture"]: run_kwargs["stdout"] = subprocess.PIPE run_kwargs["stderr"] = subprocess.PIPE result = subprocess.run(prepared_command, **run_kwargs) command_results = {} if hasattr(result, "args"): command_results.update({"command": " ".join((getattr(result, "args")))}) if hasattr(result, "returncode"): command_results.update({"returncode": str(getattr(result, "returncode"))}) if hasattr(result, "stderr"): command_results.update({"error": str(getattr(result, "stderr"))}) if hasattr(result, "stdout"): command_results.update({"output": str(getattr(result, "stdout"))}) output_results.append(command_results) return output_results
2c265bde354974a215085a897e57dd897528b095
10,449
from datetime import datetime def _is_start_date_before_end_date(start: datetime, end: datetime) -> bool: """Whether the start date is before the end date. Args: start: The start date of an event. end: The end date of an event. Returns: True if valid, otherwise returns False. """ return start <= end
4a296d6673f6beb704b590893088c50a97184764
10,450
import os def get_data_folder(): """ Returns the location of the folder containing data files. """ path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'data') return os.path.normpath(path)
3c9e99506bbdaabc1449ca1da2ecaf6afc498b97
10,451
def ConvertIndexListToSet(index_list): """Creates a set containing the indices of all '1' entries in the index list """ return set(i + 1 for i, j in enumerate(index_list) if j == 1)
78d0769de4b22aabd0d0ea2f906958a929da5299
10,452
from typing import Tuple def decrypt(text_enc: Tuple[int, int]) -> str: """Function that decrypt the tuple of tokens and re-convert them into string. :param text_enc: the tuple of the text encrypted :return: the text decrypted """ encrypted = text_enc[0] ^ text_enc[1] decrypted = encrypted.to_bytes((encrypted.bit_length() + 7) // 8, 'big') return decrypted.decode()
0496d90818ef310b885341dad2d91823eadf97e2
10,453
def count_words(text): """ this function counts words param sentence: string containing words """ if not isinstance(text, str): raise TypeError("word counter accepts only strings") normal_word_splits = text.split(" ") new_words = [] for asplit in normal_word_splits: if "\n" in asplit: new_words.append(asplit.split("\n")) elif "-" in asplit: new_words.append(asplit.split("-")) else: new_words.append(asplit) one_list = [] for i in new_words: if isinstance(i, list): for j in i: one_list.append(j) else: one_list.append(i) return len(one_list)
773d07a4092298b292601d19bba596f1e8e9bcc2
10,455
def lisser(chaine): """Retourne la chaîne lisser. On lisse une chaîne en remplaçant certains schémas comme " de le " par " du ". """ schemas = ( (" le a", " l'a"), (" le e", " l'e"), (" le hom", " l'hom"), (" le hum", " l'hum"), (" le i", " l'i"), (" le o", " l'o"), (" le u", " l'u"), (" le é", " l'é"), (" la a", " l'a"), (" la e", " l'e"), (" la his", " l'his"), (" la i", " l'i"), (" la o", " l'o"), (" la u", " l'u"), (" la é", " l'é"), (" de le ", " du "), (" à le ", " au "), (" de a", " d'a"), (" de e", " d'e"), (" de hu", " d'hu"), (" de i", " d'i"), (" de o", " d'o"), (" de u", " d'u"), (" de é", " d'é"), ) for o_val, r_val in schemas: chaine = chaine.replace(o_val, r_val) return chaine
39e6c406b3708e1f5c4fd6e9845fb3e860761ef4
10,456
def moveZeroes(nums): """ :type nums: List[int] :rtype: None Do not return anything, modify nums in-place instead. """ j = 0 for i in range(len(nums)): if nums[i] != 0: nums[j] = nums[i] j += 1 k = len(nums)-j while (k > 0): nums[-k] = 0 k -= 1 return nums
909f0dcac374dd8242dae4a13460ced4383dde3b
10,457
import argparse def get_argument_parser(): """Set up command line arguments and usage.""" parser = argparse.ArgumentParser() parser.add_argument( 'location', type=str, help='file or directory containing unit tests' ) parser.add_argument( '--pattern', '-p', default='^(test_.*|.*_test)\\.py$', type=str, help='filename regex for test discovery' ) parser.add_argument( '--terminal', '-t', default=False, action='store_true', help='do not search for tests recursively' ) parser.add_argument( '--json', '--j', default=False, action='store_true', help='print results in JSON format' ) parser.add_argument( '--color', '-c', default=False, action='store_true', help='colorize results' ) parser.add_argument( '--full', '--f', default=False, action='store_true', help='show coverage for each line' ) parser.add_argument( '--use-exit-code', default=False, action='store_true', help='use exit code to indicate non-passing tests' ) return parser
0b60c62918693fe2c1a246c2461fe8a63749ef26
10,460
def get_mentored_team(mentor, hackathon): """ Retrieve all mentored teams for a Judge at a Hackathon """ mentored_teams = None mentored_teams = mentor.mentored_teams.filter( hackathon=hackathon).order_by('display_name') return mentored_teams
de6e25b3c116b4bca4c4559b3a204d46ed5b38ba
10,461
def get_ranges(uid_number, inc=0): """ Return two block ranges to be used to create subnets for Atmosphere users. NOTE: If you change MAX_SUBNET then you should likely change the related math. """ MAX_SUBNET = 4064 # Note 16 * 256 n = uid_number % MAX_SUBNET # 16-31 block1 = (n + inc) % 16 + 16 # 1-254 block2 = ((n + inc) / 16) % 254 + 1 return (block1, block2)
b364a9051a8c8a9ea68dcee76fc7b993e30c13af
10,462
def returnRectangles(a,x): """ Returns 2D discrete integral array using the rectangle method The calculation for each array element is :math:`(\Delta y_i = 0.5(a_{n-1}+a_{n})*(x_{n-1}-x_{n})` Parameters ---------- a : numpy.ndarray Description: Array of y(x) function with N+1 elements x : numpy.ndarray Description: x- coordinate array with N elements Returns -------- numpy.ndarray """ return 0.5*(a[1:]+a[:-1])*(x[1:]-x[:-1])
dd9931bcc2eae7d6712d4e809b23e3145ee4555b
10,463
def strip_id(url): """Get MP database ID from url.""" url = url.split('/') return url[-2]
fa4ae6a3f58067b99fd5f89a2d1f10b49bc27f5e
10,464
def find_channel(channel_name, source): """ Search for channel by name on the given object """ name = channel_name.lower() for channel in source.channels: if channel.name.lower() == name: return channel return None
2b9feb9f60986392137deb6c8364e6c2d7e5efd4
10,466
def conf(db, context, log, fields): """Perform CONF command. This is supposed to adjust the configuration of the Crusher database. """ """Configure the database.""" db.configure(fields[1]) """Copy the configuration command to the log.""" log.write("{}\t{}\n".format(fields[0], fields[1])) """This is an OK time to exit, so we check if CTRL-C has been pressed.""" return db.doExit
89ac0019a1b2beccc6852f57616239a776575be2
10,467
def get_metadata(data): """Gets metadata from the input cloutformation file content.""" return data['Metadata']
5fed88a19704430272b73751938f34a3a7a09d8d
10,468
import socket def get_ip_and_port(): """The network information function. """ s = socket.socket( family=socket.AF_INET, type=socket.SOCK_DGRAM, proto=0, fileno=None) s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 0) s.connect(('<broadcast>', 0)) ip, port = s.getsockname() s.shutdown(socket.SHUT_RDWR) s.close() return ip, port
e26e2b7462eedb967380c163f8d848b0e00df369
10,470
import pytz def to_naive_utc(dtime): """convert a datetime object to UTC and than remove the tzinfo, if datetime is naive already, return it """ if not hasattr(dtime, 'tzinfo') or dtime.tzinfo is None: return dtime dtime_utc = dtime.astimezone(pytz.UTC) dtime_naive = dtime_utc.replace(tzinfo=None) return dtime_naive
4d76ef2a3a5f11efeeedb9f920bc15b55c996da3
10,471
def get_true_positives(data, classifier): """Find the total positives that also are selected by our classifier.""" return data[data["foundSegment"] & classifier(data)].size
2d1581e5f9ade4ff299557c76f3a9507c4dc5a55
10,474
def fill_coords(datafr): """ Interpoleert op cellen met NaN waarden :return DataFrame """ # df_dropped_na = datafr.dropna() # nonNA = list(df_dropped_na.index.values) # # for i in range(len(nonNA) - 1): # prev_lat = datafr.at[nonNA[i], 'latitude'] # diff_lat = abs(datafr.at[nonNA[i], 'latitude'] - datafr.at[nonNA[i + 1], 'latitude']) # diff_lat /= (nonNA[i + 1] - 2) # # for j in range((nonNA[i + 1] - nonNA[i]) - 1): # prev_lat += diff_lat # datafr.at[nonNA[j + 1], 'latitude'] = prev_lat # print(datafr.at[nonNA[j + 1], 'latitude']) # Er is een interpolatie functie in Pandas >-> datafr.interpolate(method='quadratic', inplace=True) return datafr
2fed07b6c594551bcf96484fceae3f57d52ebf83
10,477
def precision_at_k(vanilla_topk, fair_topk): """ calculate precision @ K :param vanilla_topk: top K nodes in vanilla mining result :param fair_topk: top K nodes in debiased mining result :return: precision @ K """ topk = set(fair_topk) groundtruth = set(vanilla_topk) return len(topk.intersection(groundtruth)) / len(topk)
073abf75d0a66d492541d13c799b67c1b09662f8
10,478
import hashlib import json def param_to_hash(param_dict): """Generate a hash for a fixed hyperparameter setting""" config_hash = hashlib.md5(json.dumps(param_dict, sort_keys=True).encode("utf-8")).hexdigest() return config_hash
5302000372023af0d2ea44ca3e5497741cd84e58
10,479
def lower(series): """Transform all text to lowercase.""" return series.str.lower()
fd8f443ff8cb27700bceed3a2b2843befbc932a3
10,480
def parse_account(s): """ parse entity and account string """ return s.strip().split('.')
acb14bd71a670e67b306f022792702dc2677539a
10,482
import torch def chunk_rays(rays,start,delta): """ rays: a dictionary """ rays_chunk = {} for k,v in rays.items(): if torch.is_tensor(v): v = v.view(-1, v.shape[-1]) rays_chunk[k] = v[start:start+delta] return rays_chunk
8015d1d69b77e2a7fab6b33d30a104982aedfa9c
10,483
def normalize_job_id(job_id): """Convert the job id into job_id, array_id.""" job_id = job_id.split('.')[0] if '[' in job_id: job_id, array_id = job_id.split('[') job_id = job_id.strip('[]') array_id = array_id.strip('[]') if not array_id: array_id = None else: array_id = None return job_id, array_id
6eb83b54f8a0cec8c094372a5e5446334a1863c3
10,485
import random def select_ip_from_network(network): """ Randomly selects an IP address from a particular network :param network: Network from which to select the IP address :return: IP address as integer from network """ max_address = network.address | ((1 << (32 - network.prefix)) - 1) return random.randint(network.address, max_address)
7356b6cedd5bc78933484e0cba9a8f84d1507b07
10,487
import json import requests def query(query, experiment_id, host, verify_cert=True): """ Query the MDML for an example of the data structure that your query will return. This is aimed at aiding in development of FuncX functions for use with the MDML. Parameters ---------- query : list Description of the data to send funcx. See queries format in the documentation on GitHub experiment_id : string MDML experiment ID for which the data belongs host : string Host of the MDML instance verify_cert : bool Boolean is requests should verify the SSL cert Returns ------- list Data structure that will be passed to FuncX """ resp = requests.get(f"https://{host}:1880/query?query={json.dumps(query)}&experiment_id={experiment_id}", verify=verify_cert) return json.loads(resp.text)
3080ab6322a2756fdc6cb4da8ccc81b96154cbb4
10,488
import os def attemptFileClear(fName,later): """ Attempts to remove the file. If not possible, store it in "later". @ In, fName, string, name of file to remove @ In, later, list, list of files to remove later @ Out, later, list, list of files to remove later """ try: os.remove(fName) except OSError: later.append(fName) return later
208dcce6ffa1a6de65a93f0b14befe019ddb20c6
10,489
def GetPercentage(number, total): """Returns the float percentage that a number is of a total.""" if not number: return 0 return float(number) / total * 100
46e2a5b2b4a3fa648792a461852f84e886810a7f
10,491
import re def parse_billing_code_col(s:str): """Split billing code into separate fields for code_type and code. E.g., 'MSDRG .... 001'.""" l:list = s.split() if 'MS-DRG' in s: code_type, code = l[0], l[4] elif re.search('CPT|HCPCS', s): code_type, code = l[0], l[1] else: code_type, code = 'Other', None return code_type, code
78865842a457c0226184ace94334d0934a0e0a5c
10,493
def DetermineLocaleType(locale_str): """Determines the locale 'type' for a given locale name. Returns: (string) Always one of the following strings, 'world' If the locale name refers to the world. 'country' If the locale name looks like a country ID. 'region' If the locale name looks like a region ID. 'city' If the locale name looks like a city ID. """ if locale_str == 'world': return 'world' depth_map = {1: 'country', 2: 'region', 3: 'city'} depth = len(locale_str.split('_')) return depth_map[depth]
52ac86d8e011d9d01b6f31ed6a27d535a55b26a4
10,495
def get_model(instance): """Get model by instance.""" return instance._meta.model
e386449ecd49d5874fb704c683c17d4492ea6f6c
10,497
def force_slashend(path): """ Return ``path`` suffixed with ``/`` (path is unchanged if it is already suffixed with ``/``). """ if not path.endswith('/'): path = path + '/' return path
2e2be0dbb88fb380e581f49af532ea7b5724d918
10,498
def unfuse_right(A, shp): """ Reverses fuse_right. """ d, chiL, chiR = shp A = A.reshape((chiL, d, chiR)).transpose((1, 0, 2)) return A
31d2e50d3a5ca715dc04ee9305db76cbc566de3e
10,500
def lift(cra, crb): """Returns the relative uplift in conversion rate. Parameters ---------- cra: float Conversion rate of Group A crb: float Conversion rate of Group B Returns ------- float Relative uplift in conversion rate """ return ((crb - cra) / cra) * 100
2489fef702153657f007751bc5234d11d8333f75
10,502
import ast def load(symbol_id: str) -> ast.Name: """Returns an AST Name node that loads a variable.""" return ast.Name(id=symbol_id, ctx=ast.Load())
94a76bdac89a9e7b1f766832dd2aef63469704ac
10,503
def _NegGrad(_, grad): """Returns -grad.""" return -grad
c2a2800e1d80c3151019425e3871c235485c9324
10,504
from typing import OrderedDict def init_vec(f, names, data): """Initialize resizable 1d arrays to hold the outputs.""" # Iterate over 'names' and columns of 'data' dset = [ (name, f.create_dataset(name, data=d, maxshape=(None,))) for name, d in zip(names, data.T) ] # -> list return OrderedDict(dset)
ed024e176da68d1a6753da305cea0a36d21e0e7a
10,505
def clean_url(url): """ Reformat a URL with all querystrings stripped :param url: The URL :return: A clean URL """ return url[:url.find('?')]
56edc6db7b59e1550a68377dcdcb80a83b79e854
10,506
def level_is_rtl(lev): """ Return True if `lev' is a Right-to-Left level, False otherwise. """ return lev & 1
5c3d6127bcef2f17b347c7c26a86f723f25ab6a7
10,510
def GroupBuilder(size, builderFunc, indAttr): """size - the number of members of the group indAttr - a list of [size, possList, probList] for IndividualBuilder file - the csv file to rebuild an existing group from""" individualSize = indAttr[0] possList = indAttr[1] probList = indAttr[2] group = [] for i in range(size): group.append(builderFunc(individualSize,possList,probList)) return group
8bd144c71acd177518b12ce2dd209d2838c6fff4
10,511
def _read_timestamp(file): """Get start and end time from timestamp csv file.""" try: with open(file, 'r') as f: rows = f.readlines() starttime, endtime = float(rows[0].split(",")[0]), float(rows[-1].split(",")[0]) starttime, endtime = starttime / (10**3), endtime / (10**3) except IOError: starttime, endtime = "Nan", "Nan" return starttime, endtime
be403b62623f45e6cf43c6239c1c068a48987183
10,513
import torch def generate_padding_mask(x: torch.Tensor, length: torch.Tensor) -> torch.Tensor: """ Args: x: tensor of shape [batch_size, length] length: tensor of shape [batch_size] Returns: float tensor of shape [batch_size, length] """ assert x.dim() == 2 assert length.dim() == 1 return (torch.arange(x.shape[1], device=x.device)[None, :] < length[:, None]).to(x.dtype)
d475cfa6eb81525745bed8613827c59bac57aa5d
10,515
def object_func(x, A, b): """ Objective function for the optimization for L-BFGS-B """ y = A * x - b return 0.5 * y.dot(y)
8a9453f6f93f4b7c7e2f5202a2a81750df5c27df
10,518
def has_prefix(sub_s, d): """ :param sub_s: :return: """ for key in d: if key.startswith(sub_s): return True
c9544f3937a47eb7d9b18b4209acd3fa9283de12
10,520
def bayesdb_generator_modelnos(bdb, generator_id): """Return list of model numbers associated with given `generator_id`.""" sql = ''' SELECT modelno FROM bayesdb_generator_model AS m WHERE generator_id = ? ORDER BY modelno ASC ''' return [row[0] for row in bdb.sql_execute(sql, (generator_id,))]
e7cbb96679f25815df6a28e3eb89ad61e4b20e09
10,522
from typing import List def format_terminal_call(cmd: List[str]) -> str: """ Format commands to/from the terminal for readability :param cmd: List of strings much like sys.argv :return: Formatted string used for display purposes """ return ' '.join(cmd).replace("--", " \\ \n\t--")
63af43a7d8a5cb708f8a9f6d7467e62e378876b4
10,523
from typing import Dict import yaml def load_config_file(file_path: str) -> Dict: """ Load a YAML config file. Uses UnsafeLoader :rtype: Dict """ with open(file_path, 'r') as yaml_file: cfg = yaml.load(yaml_file, Loader=yaml.UnsafeLoader) return cfg
52024a77e8e940f919245bb7b5093043f6d2158c
10,524
def is_latin_square(row_length: int, array: str) -> bool: """Return whether array is a latin square.""" # check horizontally row = 1 column = 0 numbers = [] for index, digit in enumerate(array): # check for new row if index % row_length == 0: row += 1 column = 0 numbers = [] column += 1 print(index, digit, column, row) # check whether the digit is already in the current row if digit in numbers: return False numbers.append(digit) # check vertically return True
f85721f93f27b72797702849375992e43314a847
10,525
import argparse def _get_argparser(): """to organize and clean format argparser args""" parser = argparse.ArgumentParser() parser.add_argument( 'arg-1', help='desc' ) parser.add_argument( "--optional-arg-1", action="store", dest="optional_arg_1", default="default_optional_arg_1", help="help message" ) parser.add_argument( "--optional-arg-2", action="store", dest="optional_arg_2", default="default_optional_arg_2", help="help message" ) # . # (and any more arguments) # . # mutuall exclusive args mutually_exclusive_args = parser.add_mutually_exclusive_group() mutually_exclusive_args.add_argument( "--me_arg_1", action="store", dest="me_arg_1", default="default_me_arg_1", help="help message" ) mutually_exclusive_args.add_argument( "--me_arg_2", action="store", dest="me_arg_2", default="default_me_arg_2", help="help message" ) # . # (and any more mutually exlusive arguments) # . # catch all 'vars' argument for user to list any other field/values parameters parser.add_argument( "--vars", action="store", dest="vars", nargs="*", default=None, help="Any additional variables that do not have explicit command line options. Supply as a list in form: '--vars var1=val1 var2=val2 ..." ) return parser
c30acede95e36de05361a50d8060f497e52afbee
10,529
from typing import Optional import os def guess_requirements_path(django_directory_path: str, project_name: str) -> Optional[str]: """Guess the absolute path of requirements.txt. The logic is as the follows: 1. If "requirements.txt" exists in the given directory, return it. 2. If "requirements.txt" exists in django_directory/<project_name>, return it. 3. If files like "prod.txt", "deploy.txt" exists in django_directory/requirements, return it. 4. If none of the above exists, return None. Args: django_directory_path: Absolute path of a Django project. project_name: Name of the Django project. e.g. mysite. Returns: Absolute path of requirements.txt of the given Django project. If it cannot be found, return None. """ if os.path.exists(django_directory_path): files_list = os.listdir(django_directory_path) if 'requirements.txt' in files_list: return os.path.join(django_directory_path, 'requirements.txt') project_dir = os.path.join(django_directory_path, project_name) if os.path.exists(project_dir): files_list = os.listdir(project_dir) if 'requirements.txt' in files_list: return os.path.join(project_dir, 'requirements.txt') requirements_dir = os.path.join(django_directory_path, 'requirements') if os.path.exists(requirements_dir): files_list = os.listdir(requirements_dir) for file_name in files_list: if 'prod' in file_name or 'deploy' in file_name: return os.path.join(requirements_dir, file_name) return None
3a01319bcdd7c90e2357deb9b93cec09650784f8
10,530