content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def update_extreme(val, fncn, new_val): """ Calculate min / max in the presence of None values """ if val is None: return new_val else: return fncn(val, new_val)
6c143bd68111fead601464c341d57c32098305d2
84,038
def diff_months(sub1, sub2): """ calculates the differences in months between two dates """ years = 0 months = 0 if (sub2.year > sub1.year): years = max(0, sub2.year - (sub1.year + 1)) months = (12 - sub1.month + 1) + sub2.month elif (sub2.year == sub1.year): months = sub2.month - sub1.month elif sub2.year < sub1.year: years = min(0, sub2.year + 1 - sub1.year) months = -(12 - sub2.month + 1) - sub1.month return years * 12 + months
e083989199f40ee14f8f684ea5a089fe651c900f
84,041
def get_split_type(image_file): """Returns 'train', 'test' or 'test_sync'.""" return image_file.split('/')[-4]
bda981d49a23d9427bff2b492e343ad42a7aedc2
84,048
def char_convert(character): """ Returns the ASCII character code for the opcode and data. I could have done this in the assemble function but separated it for readability. """ new_char = int("0x" + character, 0) return new_char
b619da3411db5e9817d3e7d82f6e7a4bc1f393dc
84,054
import re def ngrams(string, n=3): """Generate a full list of ngrams from a list of strings :param string: List of strings to generate ngrams from. :type string: list (of strings) :param n: Maximum length of the n-gram. Defaults to 3. :type n: int :raises AssertionError: If you pass in a list that contains datatypes other than `string`, you're in trouble! :return: Returns list of ngrams generated from the input string. :rtype: list """ # Assert string type assert type(string) == type("string"), "String not passed in!" # Remove Punctuation from the string string = re.sub(r"[,-./]|\sBD", r"", string) # Generate zip of ngrams (n defined in function argument) ngrams = zip(*[string[i:] for i in range(n)]) # Return ngram list return ["".join(ngram) for ngram in ngrams]
ac0699e7d18d25283e92cb4af70f39bef90f7483
84,060
import re def norm_apostrophe(raw_text): """ Normalize apostrophes to standard form """ text = str(raw_text) text = re.sub(r"’", "'", text) text = re.sub(r"`", "'", text) return text
0cf8866036a178089199a0b2d2199b5bbc8b95db
84,061
import random def random_integer(*, lower=-100, upper=100): """Return a random integer.""" return random.randint(lower, upper)
6e3b5e58dece998dc4751fb323fef7e980b020f4
84,063
def denormalize(grid): """Denormalize input grid from range [0, 1] to [-1, 1] Args: grid (Tensor): The grid to be denormalize, range [0, 1]. Returns: Tensor: Denormalized grid, range [-1, 1]. """ return grid * 2.0 - 1.0
33c6901ac1630618176eda0baba3d083cb0b4643
84,064
import math def GetSampleRange(freq, duration_sec, start_sec): """Creates index range [start, end) for a chunk given span and frequency.""" chunk_start_sample = int(math.floor(freq * start_sec)) chunk_end_sample = int(math.floor(freq * duration_sec + chunk_start_sample)) return (chunk_start_sample, chunk_end_sample)
ae48460edd4f12840568f25405217e20a4f235af
84,066
import zipfile def is_zipfile(file: str) -> bool: """Wrapper function for detecting if file is a true ZIP archive""" return zipfile.is_zipfile(file)
22960eb8fb4e2f99c9a48f692588789ada2ab218
84,071
def follow_alignment(inst, id): """ If the given ID is aligned to another item, return that other item. If that item is aligned to another item, return THAT item's ID, and so on. """ # Return none if this id isn't found. found = inst.find(id) w = None if not found: return None # Look to see if there's an alignment attribute. if found.alignment: return follow_alignment(inst, found.alignment) # If there's not a word that this is a part of, if w: return follow_alignment(inst, w.id) else: return found
e1b3a68efebd113525c416fdff44f0686b94740c
84,077
def get_log_filepath(conf): """Assuming a valid conf containing the `datastores` key, retrieves the 'location' key of an object in the `datastores` list whose `type` is "file". Default is `./tasks/`. """ return next( filter(lambda ds: ds.get('type').lower() == 'file', conf.get('datastores')), {'location': './tasks/'} ).get('location')
1d820ca13e6f795d16914589011943e8bb7c1740
84,078
def parse_base_recalibration(parser): """Add Base Recalibration argument.""" parser.add_argument('--BaseRecalibration', '-brec', help='Pipeline to include base recalibration.', default=0, type=int) return(parser)
8c5c8bf73e0c3bc8829b80bd50573d41bd1f0a7a
84,079
def issubclass_safe(cls, bases) -> bool: """ like issubclass, but return False if cls is not a class, instead of raising an error: >>> issubclass_safe(Exception, BaseException) True >>> issubclass_safe(Exception, ValueError) False >>> issubclass_safe(123, BaseException) False """ try: return issubclass(cls, bases) except TypeError: return False
a9c1912d6342798053b06d1fd1a086d0027d5fe9
84,083
def ensure_trailing_slash(url): """ Ensure that a URL has a slash at the end, which helps us avoid HTTP redirects. """ return url.rstrip('/') + '/'
8d762c3ff29c724ef656e13b3bf26200a8cf4bba
84,085
import re def get_aperture_coeffs_in_header(head): """Get coefficients of each aperture from the FITS header. Args: head (:class:`astropy.io.fits.Header`): Header of FITS file. Returns: *dict*: A dict containing coefficients for each aperture and each channel. """ coeffs = {} for key, value in head.items(): exp = '^GAMSE TRACE CHANNEL [A-Z] APERTURE \d+ COEFF \d+$' if re.match(exp, key) is not None: g = key.split() channel = g[3] aperture = int(g[5]) icoeff = int(g[7]) if (channel, aperture) not in coeffs: coeffs[(channel, aperture)] = [] if len(coeffs[(channel, aperture)]) == icoeff: coeffs[(channel, aperture)].append(value) return coeffs
af1f7de823bc870de8d878a71ec16533fa91e45f
84,087
import math def round_to_n_significant_digits(value, n_digits): """Round to n significant digits. Rounds a number to n significant digits, e.g. for 1234 the result with 2 significant digits would be 1200. Args: value (float/int): the value to be rounded n_digits (int): the desired number of significant digits Returns: float: the value rounded to the desired number of significant digits """ if not isinstance(value, (float, int)): raise TypeError("Value must be int or float") if not isinstance(n_digits, int): raise TypeError("Number of digits must be int") if not n_digits > 0: raise ValueError("Number of digits must be greater than zero") return round(value, n_digits - 1 - int(math.floor(math.log10(abs(value)))))
4fc4f19aa8c16595b89b1c0d8f748b7286129273
84,091
def teacher_input(random_numbers): """ Defines the contents of `teacher-input.txt` to which `run.sh` concatenates the student submission as the exercise is posted to MOOC Grader for grading. Generates 3 random numbers and forms a string containing the MathCheck configuration for this exercise. """ return \ u"arithmetic\n" + \ u"f_nodes 3\n" + \ u"{}/3^({}) + {}\n".format(random_numbers[0],random_numbers[1],random_numbers[2],)
34633a8a7b471896d71f8db09c0c2d3af1b79836
84,097
def qiskit_2qb(qc): """ Returns the list of the qiskit gate methods that affect 2 qubit and take no parameter. """ return [qc.swap, qc.cx, qc.cy, qc.cz, qc.ch]
a51dcf6a6c1e0aa013f1e5a1a433365fea0eba92
84,098
import string import secrets def generate_random_string(size: int = 8, chars: str = string.ascii_lowercase + string.digits) -> str: """Generate a random string of a given size containing letters and digits""" random_string = ''.join(secrets.SystemRandom().choice(chars) for _ in range(size)) return random_string
250db60bed29f95586eb14da2b9cc61790fb06c8
84,101
import uuid import json import requests def set_azure_cloudcheckr_application_service_assignment(AzureApiBearerToken, AzureReaderRoleId, AzureCloudCheckrApplicationServicePrincipalId, AzureSubscriptionId): """ Sets the previously created CloudCheckr application to have a reader role assignment. https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-rest """ RoleAssignmentId = str(uuid.uuid1()) api_url = "https://management.azure.com/subscriptions/" + AzureSubscriptionId + "/providers/Microsoft.Authorization/roleAssignments/" + RoleAssignmentId + "?api-version=2015-07-01" authorization_value = "Bearer " + AzureApiBearerToken role_assignment_data = json.dumps({"properties": {"principalId": AzureCloudCheckrApplicationServicePrincipalId, "roleDefinitionId": AzureReaderRoleId}}) response = requests.put(api_url, headers={"Authorization": authorization_value, "Content-Type": "application/json"}, data=role_assignment_data) print(response.json()) if "properties" in response.json(): properties = response.json()["properties"] if "roleDefinitionId" in properties: return properties["roleDefinitionId"] print("Failed to set role assignment for the CloudCheckr Application to the specified subscription") return None
1d0462f885810977f9ec40e950d8d7df3e0471a0
84,102
def contains(search_list, predicate): """Returns true if and only if the list contains an element x where predicate(x) is True.""" for element in search_list: if predicate(element): return True return False
2571e4d328664e4656f9866fd130ff34d7e0c330
84,107
import re def clean(df, col): """Cleaning Twiitter data Arguments: df {[pandas dataframe]} -- Dataset that needs to be cleaned col {[string]} -- column in which text is present Returns: [pandas dataframe] -- Datframe with a "clean_text" column """ df["clean_text"] = df[col] df["clean_text"] = ( (df["clean_text"]) .apply(lambda text: re.sub(r"RT\s@\w+:", "Retweet", text)) # Removes RTS .apply(lambda text: re.sub(r"@", "mention ", text)) # Replaces @ with mention .apply(lambda text: re.sub(r"#", "hashtag ", text)) # Replaces # with hastag .apply(lambda text: re.sub(r"http\S+", "", text)) # Removes URL ) return df
f09cd911b3065fea0c9902645da3ec9c5d94ce41
84,109
import random def randomSplit(l,propTrain,propValidate): """Create list of indexes to split the data into training, validation, and testing sets. Parameters ---------- l : int length of the list to propTrain : float [0->1] proportion of data that should be training data propValidate : float [0->1] proportion of data that should be validation data Returns ------- trainInd : List of int Indices to use for training data valInd : List of int Indices to use for validation data testInd : List of int Indices to use for testing data """ # create list of indexes ind = [i for i in range(l)] random.shuffle(ind) b1 = round(propTrain*len(ind)) b2 = round((propTrain+propValidate)*len(ind)) trainInd = ind[:b1] valInd = ind[b1:b2] testInd = ind[b2:] return trainInd,valInd,testInd
25e44e6fa164fce9596e8af8ec5ea51ba561810d
84,112
def get_accept_header(request): """ Extract the accept header from the request. Args: request (HTTPRequest): The HTTP request Return: a dict, if present key = 'Accept' """ try: return {'Accept':request.META['HTTP_ACCEPT']} except KeyError: return {}
fea942833c0e81d409b47d745b7910d5fa3b8fd4
84,120
def flatten(data): """ Given a batch of N-D tensors, reshapes them into 1-Dim flat tensor. """ B = data.size(0) return data.view(B, -1)
dd434241a8f3a491e39094f485e12973e6ef4c4a
84,126
def parse_ucx(name): """ Helper function that takes an object name and returns a 2-tuple consisting of the original object name (without 'UCX_' prefix) and UCX index suffix as an int. https://docs.unrealengine.com/latest/INT/Engine/Content/FBX/StaticMeshes/index.html#collision Will return (None, None) if the input name is not a UCX_ at all. Will return an index of -1 if the input is a UCX, but no index could be parsed. """ if not name.startswith("UCX_"): return (None, None) else: # strip UCX_ prefix name = name[4:] # index starting value idx = -1 # check if this has Blender's duplicated object naming scheme if len(name) > 4: if name[-1].isdigit() and name[-2].isdigit() and name[-3].isdigit() and name[-4] == ".": # add to the index whatever value is in the last 3 digits idx += int(name[-3:]) # strip the numbers and dot from the name name = name[:-4] # extract all characters from the end that are numerica last_digits = [] for i in range(1, len(name)): if name[-i].isdigit(): last_digits.insert(0, name[-i]) else: break # strip the digits off the end of the name name = name[:-len(last_digits)] # if there was a dot or underscore seperating the digit, strip that too if name.endswith(".") or name.endswith("_"): name = name[:-1] # convert last digits (an array of digit characters) into an int try: idx += int("".join(last_digits)) except ValueError: # failed to get an index, but this is still a UCX return (name, idx) return (name, idx)
1ed888d0eda559edc538dc703e510fed4bd53cdf
84,128
def get_area_cols(df): """Return all of the columns that represent area measurements in the Ames Housing Dataset. :param df: pd.DataFrame. Ames Housing Dataset :returns: list(str). List of column names. """ return list(filter(lambda _: any(x in _ for x in ["SF", "Area"]), df.columns)) + [ "LotFrontage", "SalePrice", ]
0e6c6c2046bcb7004aa9ced7d1d9fbe17f87c5b8
84,129
def date_name_converter(date): """Convert date strings like "DD-MonthName3Letters-YY" to "MM-DD-YY" """ for month_num, month in enumerate( ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']): num_str = str(month_num + 1) if len(num_str) == 1: num_str = '0' + num_str date = date.replace(month, num_str) date_dd, date_mm, date_yy = date.split('-') return '%s-%s-%s' % (date_mm, date_dd, date_yy)
208416fee6fb11b40de69f29b9bf215638b3ae21
84,137
def is_indented(line, indent=1, exact=False): """Checks if the line is indented. By default, a line with indent equal to or greater passes. Args: line (str): The line to check. indent (int): The length of indent to check. exact (bool): Whether the indent must be exact. Returns: bool: True if the line has the indent. """ for idx, char in enumerate(line): if idx >= indent: if exact: return not char.isspace() return True elif not char.isspace(): return False return False
28b84f11d821a6517d043148a06e904a6e7b9c88
84,139
import html def _get_html(data: str) -> str: """Html entity encodes data.""" return html.escape(data)
c0c44678c5ab42f941232eca756aaf9bedfaf623
84,140
def __get_accuracy(predictor, test_set, evaluate): """Calculates the accuracy of a given classification predictor using the given test set. :param predictor: Predictor to test. :param test_set: Test set to use for testing. :param evaluate: Function that is used to evaluate the predictor. Should take as arguments the predictor to evaluate and the input and returns corresponding prediction. :return: Measured accuracy of the predictor.""" correct_count = 0 for point in test_set: input = point[0:-1] output = point[-1] prediction = evaluate(predictor, input) if prediction == output: correct_count += 1 return correct_count/len(test_set)
e15a47d73c92a394d371fb6ba0bf3c1628d7841a
84,143
import random def createRandomString(length, numOnes): """Returns a random binary string with specified number of randomly located ones.""" counter = numOnes string = '' for i in range(length): string += '0' while counter !=0: loc = random.randrange(length) while string[loc] == '1': loc = random.randrange(length) string = string[:loc] + '1' + string[loc+1:] counter -= 1 return string
87de4405372f50dedc51519f4e3fb45c0c3f481b
84,145
def month_labels(months): """ Retrieve month labels for keying corr maps and plotting . Parameters ---------- months : result of warm_months func Returns ------- 4-keyed dictionary with labels for 3 individual months and 'month1 - month2 - month3' """ month_dict = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'} labels = {} for month in months: labels[month] = month_dict[month] labels['all'] = month_dict[months[0]] + ' - ' +month_dict[months[1]] + ' - ' + month_dict[months[2]] return labels
f2990a18378175ff43a164e5679de4e752c21721
84,147
def get_length_param(text: str, tokenizer) -> str: """Maps text to 1 of 4 buckets based on length after encoding. Parameters ---------- text: str The text to be given 1 of 4 length parameters. tokenizer: HuggingFace tokenizer Tokenizer that used to compute the length of the text after encoding. For more info ee https://huggingface.co/transformers/main_classes/tokenizer.html Returns ------- len_param: str One of four buckets: '1' for short, '2' for medium, '3' for long texts and '-' for all others. """ tokens_count = len(tokenizer.encode(text)) if tokens_count <= 15: len_param = '1' elif tokens_count <= 50: len_param = '2' elif tokens_count <= 256: len_param = '3' else: len_param = '-' return len_param
d54c8c4d1ee33ec113b4ba94ae36be0fef8fd900
84,154
def last_remaining2(n: int, m: int) -> int: """ n = 1: f(n, m) = 0 n > 1: f(n, m) = (f(n-1, m) + m) % n """ if n <= 0 or m <= 0: return -1 last = 0 for i in range(2, n+1): last = (last + m) % i return last
ea7ccbd7a4fa1011b378dc7307572d109cb4913b
84,157
def mkdir(name, children=[], meta={}): """Return directory node.""" return { 'name': name, 'children': children, 'meta': meta, 'type': 'directory' }
0634ec7dab259ee99c3796da5d869c3b1fe3bc7f
84,163
def get_prizes(matching_numbers, powerball_match): """Look up the prizes for winning numbers """ prizes = {True: {0: 0, 1: 4, 2: 4, 3: 7, 4: 100, 5: 50000, 6: 'Jackpot!'}, False: {0: 0, 1: 0, 2: 0, 3: 7, 4: 100, 5: 1000000}} return prizes[powerball_match][len(matching_numbers)]
c5231397d1f572607f550e841c6d0e01bcd7c1eb
84,165
def live_points_to_dict(live_points, names=None): """ Convert a structured array of live points to a dictionary with a key per field. Parameters ---------- live_points : structured_array Array of live points names : list of str or None If None all fields in the structured array are added to the dictionary else only those included in the list are added. Returns ------- dict Dictionary of live points """ if names is None: names = live_points.dtype.names return {f: live_points[f] for f in names}
c3c2fad53c1304dd49a054baf06c34437d393d8d
84,171
from typing import Iterable def always_true(args: Iterable[str]) -> bool: """Constant true.""" del args return True
5b0c8112dc954332ccb019934f10b980a3924407
84,174
def clipMinMax(size, minSize, maxSize): """ Clip the size so it is bigger then minSize but smaller than maxSize. """ return size.expandedTo(minSize).boundedTo(maxSize)
5d812694f14337797d0423564314546369b3957b
84,182
def define_tflite_tensors(interpreter): """ Define input and output tensors (i.e. data) for the object detection classifier """ input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() # Input tensor is the image image_tensor = input_details[0]['index'] # Output tensors are the detection boxes, scores, and classes # Each box represents a part of the image where a particular object was detected detection_boxes = output_details[0]['index'] # Each score represents level of confidence for each of the objects. # The score is shown on the result image, together with the class label. detection_classes = output_details[1]['index'] detection_scores = output_details[2]['index'] return image_tensor, [detection_boxes, detection_classes, detection_scores]
48ad28909f69c60a955eeae273969dd1b6c03327
84,183
def to_string(list): """ Convert list to string. If empty it is set to (not set) """ if list: return ''.join(list) else: return '(not set)'
4ad9c1eac4e91eef1555602cdca5b44855227cd9
84,192
def unicode_to_ascii_punctuation(text): """ A helper function to convert certain punctuation characters from Unicode to ASCII :param text: String to process :type text: str :return: String with ASCII punctuation where relevant :rtype: str >>> unicode_to_ascii_punctuation('‘GG’') "'GG'" >>> unicode_to_ascii_punctuation('“G—G”') '"G-G"' >>> unicode_to_ascii_punctuation('GG') 'GG' >>> unicode_to_ascii_punctuation('⁰') '⁰' """ punctuation_map = text.maketrans('“‘—’”', '"\'-\'"') return text.translate(punctuation_map)
7ae7d1627c6d22b0c62da7b8c8dcbcae309cc33b
84,195
import re def libraries_from_requirements(requirements): """ Clean up supplied requirements.txt and turn into tuple of CP libraries :param str requirements: A string version of a requirements.txt :return: tuple of library names """ libraries = () for line in requirements.split("\n"): line = line.lower().strip() if line.startswith("#") or line == "": # skip comments pass else: if any(operators in line for operators in [">", "<", "="]): # Remove everything after any pip style version specifiers line = re.split("[<|>|=|]", line)[0] libraries = libraries + (line,) return libraries
7b5f1a53514cc6467952c22b0901ab69f3caee05
84,201
def fetch_seq(seq, model='cterm'): """Slices the protein sequence to extract out the n-terminal or c-terminal protein sequence""" if model == 'cterm': return seq[-23:] else: return seq[1:24]
23249d442e78d3b02c1a57b3c0899fbd83d37756
84,203
def as_filepath(dotted_path): """Inverse of modularize, transforms a dotted path to a file path (with /). Keyword Arguments: dotted_path {str} -- A dotted path such app.controllers Returns: value {str} -- a file path such as app/controllers """ return dotted_path.replace(".", "/")
95e8c8cdeb40ba478c08a5c2119b382e17bf37d4
84,205
from datetime import datetime def format_unixnano(unixnano): """Formats an integer unix timestamp from nanoseconds to a user readable string Args: unixnano (int): integer unix timestamp in nanoseconds Returns: formatted_time (str) """ return datetime.fromtimestamp(int(unixnano / 1e6) / 1000.0)\ .strftime('%Y-%m-%d %H:%M:%S.%f')
719c26d289ee5d5f17e545b355cfec839cef09cf
84,207
import pickle def _load_pickle(input_file): """Load object from pickle.""" with open(input_file, "rb") as input_file: output = pickle.load(input_file) return output
d0404e9ae02cce5035439bfb6a483974ee26df0c
84,209
def x1y1x2y2_to_x1y1wh(boxes): """ Converts boxes from x1y1x2y2 (upper left, bottom right) format to x1y1wh (upper left, width and height) format """ boxes[:,2] = boxes[:,2] - boxes[:,0] boxes[:,3] = boxes[:,3] - boxes[:,1] return boxes
3e21b6ba254fb4ffe1295f70208392295bb27720
84,210
def hr_bytes(n): """ Human readable bytes value Notes: http://code.activestate.com/recipes/578019 Args: n (int): bytes Returns: string: human readable bytes value """ symbols = (u'K', u'M', u'G', u'T', u'P', u'E', u'Z', u'Y') prefix = {} for i, s in enumerate(symbols): prefix[s] = 1 << (i + 1) * 10 for s in reversed(symbols): if n >= prefix[s]: value = float(n) / prefix[s] return u'%.1f%s' % (value, s) return u'%sB' % n
3f84e4b19b153cf40da7893e1bb80a89621606d6
84,217
def atoi(s): """Convert string to integer without doing int(s). '123' -> 123 @param s string to convert. @returns integer """ if not s: raise ValueError i = 0 idx = 0 neg = False if s[0] == '-': neg = True idx += 1 for c in s[idx:]: i *= 10 i += int(c) if neg: i = -i return i
cb04bba338ca568ed3d65828e9e956540a243bef
84,218
from datetime import datetime def format_dates_for_query(date_list): """Format list of dates as needed for query. Date list is turned into a single string for use in BigQuery query `where` statement that filters by date. Parameters ---------- dates_list: list collection of dates of days we want to pull data for Returns ------- list[str]: ["YYYY-MM-DD"), "YYYY-MM-DD"] """ formatted_date_strings = [datetime.strftime(date, "%Y-%m-%d") for date in date_list] return formatted_date_strings
67bbab5326e3547dffab1072f133717fc79227d9
84,219
import re def check_umi_template(umi, template): """ Checks that the UMI (molecular barcode) given as input complies with the pattern given in template. Returns True if the UMI complies :param umi: a molecular barcode :param template: a reg-based template with the same distance of the UMI that should tell how the UMI should be formed :type umi: str :type template: str :return: True if the given molecular barcode fits the pattern given """ p = re.compile(template) return p.match(umi) is not None
1467711238f6ee25d447ec8b6adb813e0ab3946d
84,223
def update_dictionary(dict1, dict2): """Recursively update dict1 values with those of dict2""" for key, value in dict2.items(): if key in dict1: if isinstance(value, dict): dict1[key] = update_dictionary(dict1[key], value) else: dict1[key] = value return dict1
954fe4f199a61d3c972bba455d2a6102c3cd0d54
84,225
def str_str(this): """ Identity method """ return this # identity
939e7883e30357010803b9f30f9f5313b51e4614
84,227
import textwrap def get_wrapped_text(text, width=80): """Formats a given multiline string to wrap at a given width, while preserving newlines (and removing excessive whitespace). Parameters ---------- text : str String to format Returns ------- str: Wrapped string """ lines = text.split("\n") lines = [textwrap.fill(" ".join(l.split()), width=width) for l in lines] return "\n".join(lines)
399cb43cff17cf6ec5ff25233c13a66e106f0e5d
84,231
def inverse_z_score(X, std, mu=None): """ Reverses z_score normalization using previously computed mu and sigma. Parameters: X' (np.array)[*, k]: The normalized data. sigma (np.array)[k, k]: The previously computed standard deviations. Keywords: mu (np.array)[k]: Optional, previously computed mean vector. Returns: X (np.array)[*, k]: The un-normalized data. """ if mu is None: return std ** 2 * X else: return std * X + mu
316d40724663c767be7209c133e215fa640e040b
84,237
def separate_strings_from_dicts(elements): """ Receive a list of strings and dicts an returns 2 lists, one solely with string and the other with dicts. :param List[Union[str, Dict[str, str]]] elements: :rtype: Tuple[List[str], List[Dict[str, str]]] """ all_strs = [] all_dicts = [] for item in elements: if isinstance(item, str): all_strs.append(item) elif isinstance(item, dict): all_dicts.append(item) else: raise RuntimeError(f"Only strings and dicts are supported, got: {item!r}") return all_strs, all_dicts
3ec52f5ea82d1564dbc77afc0fbdc4794644d008
84,239
def in_list(ldict, key, value): """ Find whether a key/value pair is inside of a list of dictionaries. @param ldict: List of dictionaries @param key: The key to use for comparision @param value: The value to look for """ for l in ldict: if l[key] == value: return True return False
4a43a0d077205db05c119b4e9c81a9f168bf7875
84,241
def maxed_rect(max_width,max_height,aspect_ratio): """ Calculates maximized width and height for a rectangular area not exceding a maximum width and height. :param max_width: Maximum width for the rectangle :type max_width: float :param max_height: Maximum height for the rectangle :type max_height: float :param aspect_ratio: Aspect ratio for the maxed rectangle :type aspect_ratio: float :returns: A tuple with width and height of the maxed rectangle :rtype: Tuple Examples: >>> maxed_rect(800,600,aspect_ratio=4/3.) (800.0, 600.0) >>> maxed_rect(800,1600,aspect_ratio=800/600.) (800.0, 600.0) >>> maxed_rect(1600,600,aspect_ratio=800/600.) (450.0, 600.0) """ mw=float(max_width) mh=float(max_height) ar=float(aspect_ratio) # Do by width w=mw h=(mw/(aspect_ratio)) if h > mh: # Do by height h=mh w=(mh/(aspect_ratio)) return(w,h)
886cda77c833c3614b05cb41c9eeb209ea90c149
84,247
def parse_csv_data (csv_filename: str) -> list[str]: """ (For Test) Parses the csv data from filename into list of strings. Arguments: csv_filename: The name of the csv file to be opened. Parameters: data_list: List of csv data. Returns: The list of data. """ data_list = open(csv_filename, encoding="utf-8").readlines() return data_list
fb643fdc4e5a0ea65aef75b7c58246fa024461ce
84,249
import re def is_md5_hash(h): """Is this the correct format to be an md5 hash.""" return re.match("[a-f0-9]{32}", h) is not None
7c55de80b2f68b8ff09b9ea7c89a8c83ef51bf10
84,252
def getNonEmptyRowPercentage(df, column): """This function counts the number of non empty cells of df[column] and returns the percentage based on the total number of rows.""" notEmpty = df[column].notnull().sum() return (notEmpty*100)/len(df.index)
cc751942fd22aa62c1911e2ca16363c9df8585bc
84,254
def calc_moving_average(data, n): """Calculates the average of the last n values in the data dictionary :param data: dictionary :param n: number of days used to calculate the moving average :return: integer value of the average """ past_n_days = data[-n:] sum_values = 0 for day in past_n_days: sum_values += day["daily_deaths"] n_moving_average = int(sum_values / n) return n_moving_average
5e370e01036924243730f7f29d4d3dcf7e33c36b
84,262
def _sf(string): """ Make a string CSV-safe. """ if not string: return '' return string.replace('"', '""').encode('utf-8')
d148aa44d376868fe008ac8cc7bfbab6f0937000
84,265
import glob import re def best_model_from_dir(basename): """ Return best saved model from basename. """ models = glob.glob(basename + '*.index') best_model = None # get best model, if exists models_out = [] for m in models: match = re.match(re.escape(basename) + '(1?[0-9]{4}).index', m) if match: models_out.append(int(match.groups()[0])) if models_out: acc = max(models_out) best_model = basename + str(acc) return best_model
3de618633aba1bc63a11ef8268e78117eda77405
84,269
def make_a_list_from_uncommon_items(list1, list2): """ Make a list from uncommon items between two lists. :param list1: list :param list2: list :return: list """ try: if len(list1) - len(list2) >= 0: return list(set(list1) - set(list2)) return list(set(list2) - set(list1)) except Exception as error: return error
5cb9eaf816e1ea3ca9f0c4630ec618f06a4138b4
84,270
def success_check(filename): """Parse the given log file and check whether the script execution was successful or not Parameters ---------- filename : str Name of the log file to parse Returns ------- execution : str ``success`` or ``failure`` """ with open(filename, 'r') as file_obj: all_lines = file_obj.readlines() final_line = all_lines[-1] if 'complete' in final_line.lower(): execution = 'success' else: execution = 'failure' return execution
1c939f6003217af798928b4bac6670b087192e69
84,284
def transaction_id(request): """ Extract the transaction id from the given request. :param IRequest request: The request we are trying to get the transaction id for. :returns: A string transaction id. """ return request.responseHeaders.getRawHeaders('X-Response-Id')[0]
61329f18658f5d3fcce756cb2ac816e3e89ccd62
84,288
def train_model(model, train_data, train_targets, epochs): """ This function should train the model for the given number of epochs on the train_data and train_targets. Your function should return the training history, as returned by model.fit. """ return model.fit(train_data, train_targets, epochs = epochs, batch_size = 40, validation_split=0.15, verbose = False)
d8191a56e9aaff9af992c465e58bc3d7d2146ae2
84,292
def max_area(hs): """ Find the maximum area between 2 poles whose heights are given in the list hs. """ i = 0 j = len(hs)-1 amax = (j-i) * min(hs[j], hs[i]) while j-i > 1: if hs[i] <= hs[j]: i+=1 else: j-=1 a = (j-i) * min(hs[j], hs[i]) if a > amax: amax = a return amax
8ecd3469d83c73dc891184bd43a3c07455f41940
84,294
def _clean_alignment(row, decomp): """ Cleaning function for a pd.DataFrame to return the number of components used in the decomposition. Parameters ---------- alignment : pd.Series A pd.Series object denoting the used alignment stimuli. Must contain the substring provided in `decomp` decomp : str Must be a str in ['pca', 'srm'] """ try: decomp_n_comps, stim = row['alignment'].split(sep=' of ') n_comps = decomp_n_comps[len(decomp):] except ValueError: # Too many values to unpack n_comps, stim = 'Full', row['alignment'] return {'n_comps': n_comps, 'stim': stim}
1cd15e47d3f471ad4d0d61f9e151b603a0d8b6c8
84,295
from functools import reduce import operator def addtogether(*things): """Add together everything in the list `things`.""" return reduce(operator.add, things)
20d121ee101b523be8d6302cfa5f1245d97619b8
84,298
def list_intersection(a, b): """ Find the first element in the intersection of two lists """ result = list(set(a).intersection(set(b))) return result[0] if result else None
be5eef1a45a324c7cf7855706b01ff9463319224
84,302
def find_missing(integers_list, start=None, limit=None): """ Given a list of integers and optionally a start and an end finds all integers from start to end that are not in the list """ start = start if start is not None else integers_list[0] limit = limit if limit is not None else integers_list[-1] return [i for i in range(start, limit + 1) if i not in integers_list]
317b60d7b9dd1fb6168ce2fd2bc00f03a69772a7
84,304
def parse_bigg_response(res, ms, bi, log): """ Parse the BiGG API response text. Text is all plain text in JSON format. The fields of interest are the KEGG Reaction ID or the EC number. :param res: API JSON response :type res: dict :param ms: ModelSEED reaction ID :type ms: str :param bi: BiGG reaction ID :type bi: str :param log: Log output file handle :type log: File :return: KO IDs and EC numbers :rtype: dict """ data = {'KO': set(), 'EC': set()} # Data to return # Check if any database info exists db_info = res['database_links'] if len(db_info) == 0: log.write('No database info for BiGG ' + bi + '\n') # Check for KEGG elif 'KEGG Reaction' in db_info: # May have multiple KO identfiers for item in db_info['KEGG Reaction']: if 'id' not in item: log.write('KEGG reaction found but no ID for BiGG ' + bi + ' and ModelSEED ' + ms + ' \n') data['KO'].add(item['id']) # Check for EC number of KEGG does not exist elif 'EC Number' in db_info: # May have multiple EC numbers for item in db_info['EC Number']: if 'id' not in item: log.write('EC number found but no ID for BiGG ' + bi + ' and ModelSEED ' + ms + ' \n') data['EC'].add(item['id']) # No KEGG or EC else: log.write('No KEGG Reaction or EC Number for BiGG ' + bi + ' and ModelSEED ' + ms + ' \n') return data
654966cb0e0d5f7bf5b6e244b8aabda5536b11ca
84,305
def reverse_map_path(rev_map, path, interfaces = False): """Returns list of nodes in path interfaces selects whether to return only nodes, or interfaces e.g. eth0.r1 or just r1 """ result = [] for hop in path: if hop in rev_map['infra_interfaces']: iface = rev_map['infra_interfaces'][hop] if interfaces: result.append(iface) else: result.append(iface.node) elif hop in rev_map['loopbacks']: node = rev_map['loopbacks'][hop] result.append(node) return result
9747d3f069b448dfab60c1c421ab7a5be4d220b0
84,309
def drop_duplicate(df, keep = 'first'): """ Drops duplicate rows keep = first - keeping the first occurrence keep = 'fast - keeping last occurrence keep = False - keeps nothing. """ df = df.drop_duplicates(subset=None, keep= keep, inplace=False) return df
87fccf61c54aa77e6fa43708ea9b9f0de3d65047
84,310
def trim(text): """Remove whitespace from both sides of a string.""" return text.lstrip().rstrip()
ab8da016c0565f1561070c938976e67006d6e079
84,311
def scale_vec(vec, scale = 0.1, dimensions = 2): """ Performs a scalar multiplication of a vector """ scaled_vector = [] for d in range(dimensions): scaled_vector.append(vec[d] * scale) return scaled_vector
1a364ee072cc4bee501ba92cd6ccbf13560126fe
84,315
def _ExtrudePoly(mdl, poly, depth, data, isccw): """Extrude the poly by -depth in z Arguments: mdl: geom.Model - where to do extrusion poly: list of vertex indices depth: float data: application data isccw: True if counter-clockwise Side Effects For all edges in poly, make quads in Model extending those edges by depth in the negative z direction. The application data will be the data of the face that the edge is part of. Returns: list of int - vertices for extruded poly """ if len(poly) < 2: return extruded_poly = [] points = mdl.points if isccw: incr = 1 else: incr = -1 for i, v in enumerate(poly): vnext = poly[(i + incr) % len(poly)] (x0, y0, z0) = points.pos[v] (x1, y1, z1) = points.pos[vnext] vextrude = points.AddPoint((x0, y0, z0 - depth)) vnextextrude = points.AddPoint((x1, y1, z1 - depth)) if isccw: sideface = [v, vextrude, vnextextrude, vnext] else: sideface = [v, vnext, vnextextrude, vextrude] mdl.faces.append(sideface) mdl.face_data.append(data) extruded_poly.append(vextrude) return extruded_poly
7df1102530620eaf07e209f868411fd9d14808b3
84,322
def Statsmodels_PValues(name, results, Explanatory, NumDecimal): """ This function gives the P-Values of a statsmodels model results. Arguments: ---------- - results: statsmodels result object of the model The results object of the model - Explanatory: Pandas DataFrame The DataFrame with the explanatory series - NumDecimal: int Number of decimals for the numbers calculated Return: ---------- - PValues: str The PValues """ PValues = results.pvalues PValues = [str(round(item, NumDecimal)) for item in PValues] for item in range(0, len(Explanatory.columns)): PValues[item + 1] = str(PValues[item + 1]) + ' ' + str(Explanatory.columns[item]) PValues[0] = str(PValues[0]) PValues = ', '.join(PValues) return PValues
9c60937b8b7961a7e7d5c8cb19c8ec97e6da6b34
84,323
def get_top_n_peaks(peaks_df, n_peaks_to_use): """ Arguments: peaks_df: A dataframe. The peak hours for each day. n_peaks_to_use: An int. Number of top peaks to use in each season. Return: A dataframe. The top n_peaks_to_use peaks in each season. This function keeps the top n_peaks_to_use peak hours in each season. """ # Create a new column to hold rankings in a year rankings = peaks_df.groupby(['season'] ).adjusted_demand_MW.rank(ascending=False) peaks_df['rankings_per_season'] = rankings mask = peaks_df['rankings_per_season'] <= float(n_peaks_to_use) top_n_peaks = peaks_df[mask] return top_n_peaks
c4a170fc5b92a1666f72500b6cbc12d77d9a35e7
84,324
def search(data, element): """Linear search for element in a list. Parameters ---------- data : list with elements element : element to look for Returns ------- index : position of the element if it is present, otherwise -1 """ for index in range(len(data)): if data[index] == element: return index return -1
cbb6a470dd8992da8a2eaa6d69be39d9d450804b
84,325
def specialty_grain_to_liquid_malt_weight(grain): """ Specialty Grain to LME Weight :param float grain: Weight of Specialty Grain :return: LME Weight :rtype: float """ return grain * 0.89
0d21b984e1ec86bf841750fb862c56daa93eae97
84,330
def heuristic(point1, point2): """ Returns the distance between two points. params: point1 (int tuple): first point point2 (int tuple): second point return: dist (int): Manhattan (aka L) distance between the points """ x1, y1 = point1 x2, y2 = point2 dist = abs(x1-x2) + abs(y1-y2) return dist
31e2449640deae30d029050443bc7cb4dfc94af6
84,332
def transform_timeseries_data(timeseries, start, end=None): """Transforms a Go Metrics API metric result into a list of values for a given window period. start and end are expected to be Unix timestamps in microseconds. """ data = [] include = False for metric, points in timeseries.items(): for point in points: if point['x'] == start: include = True if include: data.append(point['y']) if end is not None and point['x'] == end: return data return data
465f078832f17a7cf79ddf8f9fbee53f4597217e
84,333
def ps_new_query(tags=[], options = "all"): """ Query from PostgreSQL Args: tags: list of tags options: 'all' means the posting must contains all the tags while 'any' means that the posting needs to contain at least one of the tags Returns: job postings containing the tags, either containing all of the tags or at least one of the tags """ if options == 'all': sql_query = "SELECT * FROM jobs WHERE tags @> array{} LIMIT 25".format(tags) else: sql_query = "SELECT * FROM jobs WHERE tags && array{} LIMIT 25".format(tags) return sql_query
a693da389e023b3879ea3e3e034a1cd62996d338
84,335
def to_int(s,min_val=1): """Convert string s to integer. if int(s)<min_val return min_val""" i = int(s) if i>min_val: return i else: return min_val
e9dcc665d88f2f5a62df6e91fa3918c8294b9dd4
84,339
import re def _get_filename_from_response(response): """Gets filename from requests response object Args: response: requests.Response() object that contains the server's response to the HTTP request. Returns: filename (str): Name of the file to be downloaded """ cd = response.headers.get("content-disposition") if not cd: return None file_name = re.findall("filename=(.+)", cd) if len(file_name) == 0: return None return file_name[0]
45f5440390ad7279283ad5f50f7433b5ba7602cf
84,341
def get_machine_type(machine_type, accelerator_count): """Get machine type for the instance. - Use the user-specified machine_type if it is not None - Otherwise, use the standard type with cpu_count = 8 x accelerator_count if user-specified accelerator_count > 0 - Otherwise, use the standard type with 8 cpu Args: machine_type: machine_type specified by the user accelerator_count: accelerator count Returns: the machine type used for the instance """ if machine_type: return machine_type cpu_count = max(accelerator_count, 1) * 8 return 'n1-standard-{}'.format(cpu_count)
50507c8856244534fb84f5aa554ba48c69b13c90
84,342
def getMissenseData(data): """ Keeps only those rows that correspond to missense mutations and are not known SNPs. Arguments: data = dataframe Returns: tp_data = dataframe """ # Keep rows that have mutation type as "Substitution - Missense" and if it # is not known SNP tp_data = data[(data["Mutation Description"] == 'Substitution - Missense')] return tp_data
0096d8dc73be67eb680dd0d64f4e883ee49b9cdd
84,343
import re def read_promer_coords(coords_file): """ Parse promer coords file. Keyword arguments: coords_file -- Path to promer output coords file (string, required) returns: A list of dictionaries with the keys: label -- An integer, in ascending order of when they are encountered. psim -- % similarity of the alignment (based on the scoring matrix that you used in promer). pid -- % AA identity in the alignment. pstp -- % stop codons in the alignment reference -- A dictionary containing the seqid, start position, end position, and strand of the alignment for the reference sequence provided to promer. query -- As with 'reference' but for the promer query sequence. """ start_finder = re.compile(r"=+") line_split = re.compile(r"\s+\|\s+|\s+") def strand_finder(string): if int(string) < 0: return '-' else: return '+' links_promer = list() with open(coords_file, 'rU') as coords: started = False for i, line in enumerate(coords): if i == 0: genomes = line.split() line = line.strip() if not started: if start_finder.match(line) != None: started = True else: comp = dict() line = line_split.split(line) comp['label'] = i comp['pid'] = float(line[6]) # %identity comp['psim'] = float(line[7]) # %similarity comp['pstp'] = float(line[8]) # %stop codons comp['reference'] = { "start": int(line[0]), "end": int(line[1]), "strand": strand_finder(line[9]), "seqid": line[11] } comp['query'] = { "start": int(line[2]), "end": int(line[3]), "strand": strand_finder(line[10]), "seqid": line[12] } links_promer.append(comp) return links_promer
7069638ec40f54fec84f56083e4ace7b250d06fe
84,349
def root_form(fn, y0): """Returns rewritten equation fn to find root at y value y0""" def fn2(x, a=0, b=0, c=0, d=0, e=0): return fn(x, a, b, c, d, e) - y0 return fn2
53515e8c887041846e4185be68d77bfc615139c4
84,354
def is_symmetrical(num: int) -> bool: """Determine if num is symmetrical.""" num_str = str(num) return num_str[::-1] == num_str
3d34133615faa9acec06821460e5f72b54178f4a
84,356
import textwrap def wrap_error(msg): """ Wraps an error message such that it will get displayed properly. @param msg: The error to display. @return: A string containing the formatted message. """ return '\n ' + '\n '.join(textwrap.wrap(msg, 77))
ca619e527c486d6f4ba6141e80e9c444f4c9edef
84,369
def get_scripts(scripts_dict, pipeline, workflow): """ input: scripts_dict: scripts dictionary described in get_scripts_dict function pipeline: name of a supported target pipeline or "all". workflow: name of a supported target workflow or "all" If the pipeline parameter is "all", returns the full scripts_dict If it isn't "all" but workflow is "all", returns dictionary with keys=workflows in pipeline, values=scripts in workflow It also contains the "All Pipelines" list from the original scripts_dict If both pipeline and workflow aren't "all" returns dictionary with keys=target workflow, All <pipeline> Workflows, All Pipelines and values=list of scripts under each key """ if pipeline == "all": return scripts_dict if workflow == "all": return {**scripts_dict[pipeline], **{"All Pipelines":scripts_dict["All Pipelines"]}} return {workflow:scripts_dict[pipeline][workflow], "All {} Workflows".format(pipeline):scripts_dict[pipeline]["All Workflows"], "All Pipelines":scripts_dict["All Pipelines"]}
0d3fdd54c3d8e82e3c5b69395ed7310722fe8205
84,372
def unpad_trajectories(trajectories, masks): """ Does the inverse operation of split_and_pad_trajectories() """ # Need to transpose before and after the masking to have proper reshaping return trajectories.transpose(1, 0)[masks.transpose(1, 0)].view(-1, trajectories.shape[0], trajectories.shape[-1]).transpose(1, 0)
e70916aec63c000541d82406e535e058f5ff4030
84,373
def pmt_pv(i, n, PV, PV0=0): """ Calculates the installment of a present Value. :param i: Interest rate :param n: Number of periods :param PV: Present Value :param PV0: Payment at t=0/ Down Payment :return: """ return i / (1 - 1 / (1 + i) ** n) * (PV - PV0)
2c63c62075017d17c273b05a358a032390c4025f
84,374