content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_template_s3_url(bucket_name, resource_path): """ Constructs S3 URL from bucket name and resource path. :param bucket_name: S3 bucket name :param prefix string: S3 path prefix :return string: S3 Url of cloudformation templates """ return 'https://%s.s3.amazonaws.com/%s' % (bucket_name, resource_path)
c54fb9f21d5b9f9457481704a27a9e9d14d04ff5
82,224
def _transform_member(member): """Transforms member object :param member: the member object :returns: dictionary of transformed member values """ return { 'id': member.id, 'address': member.address, 'protocol_port': member.protocol_port, 'weight': member.weight, 'admin_state_up': member.admin_state_up, 'subnet_id': member.subnet_id, 'provisioning_status': member.provisioning_status }
e4aae94adac52f274b9516bbd269052f5d08efc4
82,225
import collections def majority_element(arr): """ Given an array of size n, find the majority element. The majority element is the element that appears more than floor(n/2) times. """ counts = collections.defaultdict(int) for elem in arr: counts[elem] += 1 if counts[elem] > len(arr) / 2: return elem
f5cced7c9c5d6218f328665db9bfb0a44a0b75cf
82,227
import platform def get_current_platform_name() -> str: """Returns the name of the current platform. Returns: str: name of current platform """ return platform.platform()
9f017d52e4082f60a7adc4b1ff17e592b211e973
82,230
def calculate_offset(at_time): """ Helper function takes a datetime and calculates the offset assuming a 96 bit string for every quarter hour of a 24 hour day. :param at_time: Datetime for calculating the offset :rtype: int """ offset = at_time.hour * 4 #if offset is 0: # offset = 1 minute = at_time.minute if minute < 15: offset += 1 if minute > 14 and minute < 30: offset += 2 elif minute > 29 and minute < 45: offset += 3 elif minute > 44: offset += 4 return offset
73a468e6a00d0ba7bbf48dc807acd7077bda760c
82,232
from typing import OrderedDict def build_paragraph_marker_layer(root): """ Build the paragraph marker layer from the provided root of the XML tree. :param root: The root element of the XML tree. :type root: :class:`etree.Element` :return: An OrderedDict containing the locations of markers, suitable for direct transformation into JSON for use with the eRegs frontend. :rtype: :class:`collections.OrderedDict`: """ paragraphs = root.findall('.//{eregs}paragraph') # + root.findall('.//{eregs}interpParagraph') paragraph_dict = OrderedDict() for paragraph in paragraphs: marker = paragraph.get('marker') label = paragraph.get('label') if marker != '': marker_dict = {'locations': [0], 'text': marker} paragraph_dict[label] = [marker_dict] return paragraph_dict
d4b4ae6d0a1be0f4fcb3b550b0a11b2366220912
82,233
def newton(f, f_derivative, x0, eps, kmax): """Newton's method for finding roots. The Newton's method (Newton–Raphson method) is a root-finding algorithm which produces approximations to the roots (or zeroes) of a real-valued function. [Wiki]. Args: f (function): single-variable function f , f_derivative (function): the function's derivative f ′, x0 (float): initial guess, eps (float): precision wanted, kmax (int): maximum number of iterations. Returns: x (float): root of f(x) = 0. """ x = x0 x_prev = x0 + 2 * eps i = 0 while (abs(x - x_prev) >= eps) and (i < kmax): #print("Step", i, ":", int(x), int(x_prev), ", x - f(x) = ", int(x - f(x)), ", f_derivative(x) = ", int(f_derivative(x)), "f/f'=",int(f(x)/f_derivative(x))) x, x_prev = x - ( f(x) / f_derivative(x) ), x i += 1 return x
6b55260c4b5b2318a9241a9245d2d7d8bae3745c
82,234
def next_decorator(event, message, decorates): """ Helper method for IAnnouncerEmailDecorators. Call the next decorator or return. """ if decorates and len(decorates) > 0: next = decorates.pop() return next.decorate_message(event, message, decorates)
2dbec5b53e532a2187be5fd3d97078f1db088d9d
82,236
def match_label(tree1, tree2): """Generate a list of pairs of nodes in two trees with matching labels. Parameters ---------- tree1 : skbio.TreeNode tree 1 for comparison tree2 : skbio.TreeNode tree 2 for comparison Returns ------- list of tuple of (skbio.TreeNode, skbio.TreeNode) Examples -------- >>> from skbio import TreeNode >>> tree1 = TreeNode.read(['((a,b)x,(c,d)y);']) >>> tree2 = TreeNode.read(['(((a,b)x,c)y,d);']) >>> matches = match_label(tree1, tree2) >>> len(matches) 6 >>> print([' - '.join([','.join(sorted(node.subset())) for node in pair]) ... for pair in matches if not any(node.is_tip() for node in pair)]) ['a,b - a,b', 'c,d - a,b,c'] """ l2ns = [] for tree in (tree1, tree2): l2ns.append({}) for node in tree.traverse(include_self=True): label = node.name if label not in (None, ''): if label in l2ns[-1]: raise ValueError('Duplicated node label "%s" found.' % label) l2ns[-1][label] = node res = [] for label in sorted(l2ns[0]): if label in l2ns[1]: res.append((l2ns[0][label], l2ns[1][label])) return res
566f277b4a3d6a60d75350b131fcc86a8a24ab37
82,241
import pickle import re def load_dict(fname, var_names, load_func=pickle.load): """ Loads specific keys from a dictionary that was to a file :type fname: file name :type var_names: variables to retrieve. Can be a list or comma seperated string e.g. 'a, b,c' or ['a', 'b', 'c'] :param load_func: default: pickle.load """ if type(var_names) == str: var_names = re.split(', ?[, ]?', var_names) with open(fname, "rb") as f: data_dict = load_func(f) assert isinstance(data_dict, dict) return tuple([data_dict[var] for var in var_names])
fa92ad8afa0cc2ac064f77ac41063bd1d97eaf1c
82,245
def mcd(a: int, b: int) -> int: """This function returns the greatest common divisor from a and b. Args: a (int): dividend. b (int): divider. Returns: int: the GCD from a and b. """ if a % b == 0: return b return mcd(b, a % b)
decec1ef6410fc342d55e127f4bede713ee02cda
82,246
def make_ammo(request, case=''): """Make phantom ammo file Args: request (str): HTTP request case (str): ammo mark Returns: str: string in phantom ammo format """ ammo_template = ( "%d %s\n" "%s" ) return ammo_template % (len(request), case, request)
e6aa9007829941e2aaff164f113b27f2820e9a94
82,247
import re def remove_solver_output(out): """Remove extra, unwanted solver output (e.g. from Gurobi).""" filter_patterns = ( ( "\n--------------------------------------------\n" "Warning: your license will expire in .*\n" "--------------------------------------------\n\n" ), "Using license file.*\n", "Set parameter Username.*\n", "Academic license - for non-commercial use only.*\n", ) for filter_pattern in filter_patterns: out = re.sub(filter_pattern, "", out) return out
dd6160c2af7967769c400b40a56b9fa6cbbc0c5c
82,250
def _ValidateComponentClassifierConfig(component_classifier_config): """Checks that a component_classifier_config dict is properly formatted. Args: commponent_classifier_config (dict): A dictionary that provides component to its function and path patterns, and some other settings. For example: { 'path_function_component': [ [ 'src/chrome/common/extensions/api/gcm.json', '', 'Services>CloudMessaging' ], ... [ 'src/chrome/browser/accessibility', '', 'UI>Accessibility' ], ], 'top_n': 4 } Returns: True if ``component_classifier_config`` is properly formatted, False otherwise. """ if not isinstance(component_classifier_config, dict): return False path_function_component = component_classifier_config.get( 'path_function_component') if not isinstance(path_function_component, list): return False top_n = component_classifier_config.get('top_n') if not isinstance(top_n, int): return False return True
d743367e06bba747a17d80e79d1ee72af62663e1
82,251
def postfix(text: str, postfix_: str = "_") -> str: """Add a postfix `p` to the given text.""" return text + postfix_
f3d5d1149846eb6e93711fda84528676e84dc626
82,254
def check_pointing_data(msg, nats, shared_storage, logger): """ Check to see whether the message comes from an object pointing at an object Args: msg: nats message nats: nats handler object shared_storage: shared storage dictionary logger: logger object """ if msg.sender_id in shared_storage["pointing"]: return True return False
316553299c7703c7a435a5d9d70b54291ab4ed92
82,257
def SetRemove(list1,list2): """ Returns a list containing list1 with items in list2 removed. """ #---------------------- # Make A Copy Of List 1 #---------------------- ReturnList = list1[:] #--------------------------- # For Each Item In List 2... #--------------------------- # Check each item in list 2. If it's in the return list, remove it. for Item in list2: if Item in ReturnList: ReturnList.remove(Item) #---------------------- # Return List To Caller #---------------------- # ReturnList has had all the items in List 2 removed from it. We're all done! return ReturnList
c369b439f0058d680c64c76bcf9a5f0680dc09b0
82,262
import math def distance_between(first, second) -> float: """Find the distance between two points or the length of a vector""" x_diff = second[0] - first[0] y_diff = second[1] - first[1] return math.sqrt(math.pow(x_diff, 2) + math.pow(y_diff, 2))
42d4f19d20f2243749de3e639547fc476ff3e4ac
82,265
def xy_to_z(xy): """ Returns the *z* coordinate using given *xy* chromaticity coordinates. Parameters ---------- xy : array_like *xy* chromaticity coordinates. Returns ------- numeric *z* coordinate. References ---------- .. [2] `RP 177-1993 SMPTE RECOMMENDED PRACTICE - Television Color Equations: 3.3.2 <http://car.france3.mars.free.fr/HD/INA-%2026%20jan%2006/SMPTE%20normes%20et%20confs/rp177.pdf>`_, # noqa DOI: http://dx.doi.org/10.5594/S9781614821915 Examples -------- >>> xy_to_z((0.25, 0.25)) 0.5 """ return 1 - xy[0] - xy[1]
ee5813997c46d96222fb61b2429db5d80f46d5f3
82,269
def table_row(text_list): """Make a CSV row from a list.""" return ','.join(f'"{text}"' for text in text_list)
cddfdfd3ceb75763d996400e3d9dbd3742379a02
82,274
def get_strain_label(entry, viral=False): """Try to extract a strain from an assemly summary entry. First this checks 'infraspecific_name', then 'isolate', then it tries to get it from 'organism_name'. If all fails, it falls back to just returning the assembly accesion number. """ def get_strain(entry): strain = entry['infraspecific_name'] if strain != '': strain = strain.split('=')[-1] return strain strain = entry['isolate'] if strain != '': return strain if len(entry['organism_name'].split(' ')) > 2 and not viral: strain = ' '.join(entry['organism_name'].split(' ')[2:]) return strain return entry['assembly_accession'] def cleanup(strain): strain = strain.strip() strain = strain.replace(' ', '_') strain = strain.replace(';', '_') strain = strain.replace('/', '_') strain = strain.replace('\\', '_') return strain return cleanup(get_strain(entry))
ec9365aff3154998951b2a10869379c918d04e04
82,278
def _ws_defaults(data): """Set some defaults for the required workspace fields.""" defaults = { "owner": "owner", "max_obj_id": 1, "lock_status": "n", "name": "wsname", "mod_epoch": 1, "is_public": True, "is_deleted": False, "metadata": {"narrative_nice_name": "narrname"}, } # Merge the data with the above defaults return dict(defaults, **data)
5c1dcb2553eca4d5b07ce0c9e059d5666d677619
82,279
def loss(pred,target): """ Calculate loss Outputs: loss : float """ return (pred - target.float()).pow(2).sum()
fdbb7a4a141695c3541c439d27fdb1704ccca7d4
82,281
def list_input(question: str) -> list[str]: """For situations where you need a list of inputs from the user.""" print(question) return input().split()
5121ee0d8ece427e78449d3186784b25c21ceee8
82,287
from typing import Dict from typing import Union from typing import List import uuid from datetime import datetime def create_valid_passport_request_payload() -> Dict[str, Union[str, List[str]]]: """ Create a test passport request message payload Uses the details for a test passport that a test DCS instance will accept as valid. """ return { # This creates a JSON structure that the DCS can understand. "correlationId": str(uuid.uuid4()), "requestId": str(uuid.uuid4()), "timestamp": f"{datetime.utcnow().isoformat(timespec='milliseconds')}Z", "passportNumber": "824159121", "surname": "Watson", "forenames": ["Mary"], "dateOfBirth": "1932-02-25", "expiryDate": "2021-03-01", }
8b0fce4695fe9728c1613e9b66eaa815849d634f
82,292
from bs4 import BeautifulSoup def parse_webpage(s): """ Given the text of the webpage, parse it as an instance of a beautiful soup, using the default parser and encoding :param s: The text of the webpage :return: beautiful soup object """ return BeautifulSoup(s, 'html.parser')
ff7ebefaaac64fbdf20d78f16fd01cc0ed764645
82,301
def check_col_names(df1, df2, cols_only=True): """Return True or False indicating whether or not the column names are the same between two dataframes""" # Default is that columns are passed, for ease of use # with check_col_list below # will return truth value for each column pair, so we achieve # one truth value for the whole array/list by calling all if cols_only: result = all(df1 == df2) # Also possible to pass two dataframes directly by specifying # cols_only=False else: result = all(df1.columns == df2.columns) # Return True/False result return result
2211ce04d65412f59aba950478585af3c0195b09
82,302
def allindex(value, inlist): """ Mathematica Positions -equivalent :param value: :param inlist: list from which to find value :return: all indices """ # indices = [] # idx = -1 # while True: # try: # idx = qlist.index(value, idx+1) # indices.append(idx) # except ValueError: # break # return indices return [i for i, x in enumerate(inlist) if x == value]
2e57ac28d02e2e90baee711a109c2b45edfca34e
82,307
from typing import Union import jinja2 def render(template: Union[str, jinja2.Template], **context) -> str: """ Renders the given string as a jinja template with the keyword arguments as context. :param template: String to be used as jinja template :param context: keyword arguments used as context for the jinja template :return: Rendered string """ if isinstance(template, str): template = jinja2.Template(template) return template.render(context)
401e13caec0e04748502ec5385e4fd44e49cd3f2
82,308
import math def poh(concentration): """Returns the pOH from the hydroxide ion concentration.""" return -math.log(concentration)
15a586bd4358c63c6221788ff8c3c2530cd0cd98
82,312
def is_lookml(dashboard): """Check if a dashoard is a LookML dashboard""" return dashboard.model is not None and '::' in dashboard.id
39cbfeedecf12bf5060596c9188392c62aedf6cf
82,314
from pathlib import Path from typing import Set import logging def has_required_files(folder: Path, data_adj: str, required: Set[str]) -> bool: """ :param folder: a path to look for files in :param data_adj: an adjective to use in logging lines :param required: names of files to look for in folder :return: True if folder exists, and either (1) required is empty, or (2) folder contains one or more members of required. In the latter case, those members are removed from required. """ if not folder.is_dir(): logging.info(f"Not found: {data_adj} folder {folder}") return False files_found = [path.name for path in folder.glob("*.csv")] if required and not files_found: logging.info(f"Found, but empty: {data_adj} folder {folder}") return False found = required.intersection(files_found) logging.info(f"Found {len(found)} of {len(required)} files in: {data_adj} folder {folder}") if found or not required: required.difference_update(found) return True return False
8d143e3c039fd1e20454f6bb3c377be76609ec99
82,316
def path_(data, path, **kwargs): """ Reference name ``path_`` Function to retrieve content from nested structured data at given path. :param path: (str, list) dot separated path to result or list of path items :param data: (dict) data to get results from :return: results at given path Sample data:: { "VIP_cfg": { "1.1.1.1": { "config_state": "dis", "services": { "443": { "https": [ {"real_port": "443"} ], } } } } } With ``path`` ``"VIP_cfg.'1.1.1.1'.services.443.https.0.real_port"`` will return ``443`` """ ret = data # form path list if isinstance(path, str): # perform path split accounting for quotes inside path path_list = [""] inside_quotes = False for char in path: if char == "." and not inside_quotes: path_list.append("") elif char in ["'", '"']: inside_quotes = not inside_quotes else: path_list[-1] += char elif isinstance(path, list): path_list = path else: raise TypeError( "nornir-salt:DataProcessor:path unsupported path type {}".format(type(path)) ) # descend down the data path for item in path_list: if item in ret or isinstance(item, int): ret = ret[item] elif item.isdigit(): ret = ret[int(item)] return ret
a3670be0e4efc5dd5fbbe17391305a61d8fac0e1
82,318
from typing import Any def is_classmethod(parent: type, obj: Any) -> bool: """Return True if it is a class method.""" if not hasattr(obj, '__self__'): return False return obj.__self__ is parent
1010bf627ea73242f4f7f2a7e687666dc40a35b6
82,319
def contains_letter_number(text): """ A function which checks whether a String contains at least one letter and number Args: text (string): A string Returns: bool: A boolean indicating whether the text provided contains at least one letter and number """ # https://stackoverflow.com/questions/64862663/how-to-check-if-a-string-is-strictly-contains-both-letters-and-numbers return text.isalnum() and not text.isalpha() and not text.isdigit()
2c9e66dbee6aa678a84f2bca91db7d9e31700fa6
82,320
def source_authors(conv): """ Returns the set of authors that contributed a source (non-reply) post. Parameters ---------- conv : Conversation Returns ------- set(str) """ return set([conv.posts[pid].author for pid in conv.get_sources()])
c9e31353f4b128a3820fe2b91cc18c07d92cfe20
82,321
def __contains_kevin_bacon__(tweet): """ Check if a tweet contains Kevin Bacon, Kevin_Bacon, or KevinBacon (case insensitive). Args: tweet: tweet text Return: True if the tweet text contains a form of "Kevin Bacon" """ tweet_text = tweet.lower() if "kevin bacon" in tweet_text: return True if "kevin_bacon" in tweet_text: return True if "kevinbacon" in tweet_text: return True return False
d5048582bd7ef0b0f43ad2be9c08a6c691c8da57
82,323
def read_file(file_name): """ Return the content of a File """ with open(file_name, 'rb') as f: return f.read()
6d77ba0778db08ee0066d42de0effd975bed6d85
82,325
def parse_hgtector(input_f): """ Parse output of HGTector version 0.2.1. Parameters ---------- input_f: string file descriptor for HGTector output results Returns ------- output: string one putative HGT-derived gene per line columns: query_id, donor_taxid, donor_species, donor_lineage, pct_id, pct_coverage """ hgts = [] for line in input_f: x = line.strip('\r\n').split('\t') if (len(x) == 15) and (x[7] == '1'): hgt = '\t'.join((x[0], x[12], x[13], x[14], x[10], x[11])) hgts.append(hgt) return '\n'.join(hgts)
b7eacd2eed2467d107fb67795378339dc20aef27
82,327
def lt(a, b): """Evaluate whether a is less than b.""" return a < b
d785873ae885e826516d5d1279689a587d5d6e4f
82,336
def validate_fluent_file(file_path): """Check if the fluent file has duplicate keys. Returns "fluent keys" in array. """ print("\nValidating file:", file_path) fluent_keys = [] with open(file_path, 'r') as file: for line in file.readlines(): if line.find("=") != -1: translation_key = line.split("=")[0].strip() try: fluent_keys.index(translation_key) print(" - Duplicate [ " + translation_key +" ]. One of them can be removed.") except ValueError: pass fluent_keys.append(translation_key) return fluent_keys
05d11ffc4985f6782c43dcb3806379412cf31d61
82,340
def float_list(s): """Convert a string of comma separated floats to a list of floats.""" return sorted(map (float, s.split(',')))
e185cb35c932adb97b6da95d12deea7b2631044f
82,342
def warshall_adjacency(matrix): """Applies the Warshall transitive closure algorithm to a adjacency matrix. Args: matrix: The adjacency matrix. Returns: The closed form of the adjacency matrix. """ for k in range(0, len(matrix)): for i in range(0, len(matrix)): if matrix[i][k] == 1: for j in range(0, len(matrix[0])): if matrix[k][j] == 1: matrix[i][j] = 1 return matrix
1d1e11fd70db34915ae5b130c53e884ba15fba39
82,347
from typing import Union def expand(xs: Union[tuple, list]): """Expand a sequence to the same element repeated twice if there is only one element. Args: xs (tuple or list): Sequence to expand. Returns: tuple or list: `xs * 2` or `xs`. """ return xs * 2 if len(xs) == 1 else xs
ffbf761a57e12c3a54684f6369ec781431f978c3
82,349
def _bits_to_bytes_len(length_in_bits): """ Helper function that returns the numbers of bytes necessary to store the given number of bits. """ return (length_in_bits + 7) // 8
3421909f7018f8860ee3143dca6936e33d63af65
82,350
def _clean_mod(mod): """Remove and key/value pairs from module where the key is prefixed by an underscore. Args: mod (ModState): The mod to clean. Returns: ModState with all underscore-prefixed keys removed. """ for key in mod.keys(): if key.startswith('_'): mod.pop(key, None) return mod
3160c6b9c9abceba0ad9810efce8070261ff195b
82,351
def prepare_to_decision_trees(data, features=None): """ Function that returns the data and the labels ready to create a Decision tree IMPORTANT: It can cause information loss by removing entries. Don't use it to plot information :param data: cleaned extended data :param features: List of strings containing the features to be used in the training process. Possibilities: 'Mean', 'Std', 'Max', 'Min', 'MAGE'. Default: All features :return: data and labels to create DecisionTree object """ # Get labels labels = data[["Hyperglycemia_Diagnosis_Next_Block", "Hypoglycemia_Diagnosis_Next_Block", "In_Range_Diagnosis_Next_Block", "Severe_Hyperglycemia_Diagnosis_Next_Block"]] # Remove columns that cannot be passed to the estimator new_data = data.drop(["Datetime", "Day_Block", "Last_Meal", "Block_Meal"], axis=1) # Remove columns that are not included in the features if features is not None: if 'Mean' not in features: new_data.drop(["Glucose_Mean_Prev_Day", "Glucose_Mean_Prev_Block"], inplace=True, axis=1) if 'Std' not in features: new_data.drop(["Glucose_Std_Prev_Day", "Glucose_Std_Prev_Block"], inplace=True, axis=1) if 'Max' not in features: new_data.drop(["Glucose_Max_Prev_Day", "Glucose_Max_Prev_Block"], inplace=True, axis=1) if 'Min' not in features: new_data.drop(["Glucose_Min_Prev_Day", "Glucose_Min_Prev_Block"], inplace=True, axis=1) if 'MAGE' not in features: new_data.drop("MAGE_Prev_Day", inplace=True, axis=1) # Delete label columns new_data.drop(["Hyperglycemia_Diagnosis_Next_Block", "Hypoglycemia_Diagnosis_Next_Block", "In_Range_Diagnosis_Next_Block", "Severe_Hyperglycemia_Diagnosis_Next_Block"], inplace=True, axis=1) return [new_data, labels]
ff8aaa91a7847dcfbf987a75bd687f47b241230f
82,355
import math def int_sqrt(x: float) -> int: """Finds the nearest integer to the square root of ``x``. Args: x: The number whose square root will be found. Returns: The result of rounding ``sqrt(x)`` to the nearest integer. """ return int(round(math.sqrt(x)))
cafd949e816de75c3fbf0afecb48ba1130260317
82,358
def get_next_ip(ip_address, step): """ Calculates the IP address through 'step' from the initial address get_next_ip("192.168.1.16", 4) -> "192.168.1.20" """ ip_parts = ip_address.split(".") return ".".join(ip_parts[:3] + [str(int(ip_parts[3]) + step)])
bd78d97e28d312349c8bfc55bc6b5b69bbe2ef1b
82,362
def selection_sort(array): """ Selection sort algorithm. :param array: the array to be sorted. :return: sorted array >>> import random >>> array = random.sample(range(-50, 50), 100) >>> selection_sort(array) == sorted(array) True """ for i in range(0, len(array) - 1): min_index = i for j in range(i + 1, len(array)): if array[j] < array[min_index]: min_index = j if min_index != i: array[i], array[min_index] = array[min_index], array[i] return array
024cbb9016dc7181a7493895377bf5ece92a0935
82,364
def make_var_name_factory(person_list, alternative_list): """Create an LpVariable name factory that accepts indexes""" def name_factory(i, j): person = person_list[i] alternative = alternative_list[j] return f"Choice:{person},{alternative}" return name_factory
aeb86d18cd154055562cd353a7de9b7b8f0ea33f
82,366
def get_vars_and_coefficients(elements, start=3): """Use a list which comes from line.split() to create lists of float coefficients and SCIP variables.""" return [var for var in elements[start + 1::2]], [float(coeff) for coeff in elements[start::2]]
1f073737af6347e02fb64615d95ce3c0c15d904c
82,369
def filter_none_from_parameters(params): """Removes parameters whos value is :obj:None Args: params (dict): dictionary of parameters to be passed to the API. Returns: dict: the original parameters with any parameters whos value was :obj:`None` removed. """ return { key: value for key, value in params.items() if value is not None }
620c722fbd47ef88af9f865ec9b2405dfe08d02b
82,371
def match(first: str, second: str): """Test whether two string inputs are the same, up to case and wildcard.""" assert type(first) is str and type(second) is str, 'inputs are not strings' first = first.upper() second = second.upper() if len(first) == 0 and len(second) == 0: return True if len(first) > 1 and first[0] == '*' and len(second) == 0: return False if (len(first) != 0 and len(second) != 0 and first[0] == second[0]): return match(first[1:], second[1:]) if len(first) != 0 and first[0] == '*': return match(first[1:], second) or match(first, second[1:]) return False
23e61e305e156e4329a1873c33d8723580b7cb63
82,374
def total_seconds(delta): """Convert a datetime.timedelta object into a number of seconds""" return (delta.days * 24 * 60 * 60) + delta.seconds
aa883aba421e0abac982abacf8f57bd4374789c4
82,377
def train_and_fit(classifier, X_tr, Y_tr, X_te): """Train the classifier using X_tr and Y_tr, and fit X_te""" classifier.fit(X_tr, Y_tr) return classifier.predict(X_te).astype('int')
5b30270ab37cbbae4e4b8e1d3315b0069dc2d223
82,378
def omit_nulls(data): """Strips `None` values from a dictionary or `RemoteObject` instance.""" if not isinstance(data, dict): if not hasattr(data, '__dict__'): return str(data) data = dict(data.__dict__) for key in data.keys(): if data[key] is None: del data[key] return data
9ca08b7394ef4c6370d47851f6ad6978b072f15f
82,380
def sign(f: float) -> float: """Return sign of float `f` as -1 or +1, 0 returns +1""" return -1.0 if f < 0.0 else +1.0
0ed74a3d58259eaff0fa78f9d08e0f7a9aa268a0
82,382
from pathlib import Path def root_module_path() -> Path: """Return absolute root module path. Returns ------- :class:`pathlib.Path` Absolute root module path. """ return Path(__file__).resolve().parents[1]
48986dfde811819ae037318e9f4e2b21300fda25
82,386
def _get_indices_dataset_notexist(input_time_arrays): """ Build a list of [start,end] indices that match the input_time_arrays, starting at zero. Parameters ---------- input_time_arrays list of 1d xarray dataarrays or numpy arrays for the input time values Returns ------- list list of [start,end] indexes for the indices of input_time_arrays """ running_total = 0 write_indices = [] for input_time in input_time_arrays: write_indices.append([0 + running_total, len(input_time) + running_total]) running_total += len(input_time) return write_indices
f1a3f1946c972841eba01f2362a886b29d4250a1
82,387
def header(var): """Create column headers based on extracted variable.""" fixedcols = ["time", "patch", "matl", "partId"] headers = { "p.x": ["x", "y", "z"], "p.porepressure": ["p.porepressure"], "p.stress": ["sigma11", "sigma12", "sigma13", "sigma21", "sigma22", "sigma23", "sigma31", "sigma32", "sigma33"] } if var not in headers: print( "Sorry, the variable {var} is not implemented yet. No headers assigned for {var}". format(var=var)) return fixedcols + [var] return fixedcols + headers[var]
f0217bcf9962757849708f1f5241cf5782b63949
82,389
from typing import Callable import torch from typing import List from typing import Union def split_tensor_and_run_function( func: Callable[[torch.Tensor], List], tensor: torch.Tensor, split_size: Union[int, List] ) -> torch.Tensor: """Splits the tensor into chunks in given split_size and run a function on each chunk. Args: func: Function to be run on a chunk of tensor. tensor: Tensor to split. split_size: Size of a single chunk or list of sizes for each chunk. Returns: output_tensor: Tensor of same size as input tensor """ tensors_list = [] for sub_tensor in torch.split(tensor, split_size): tensors_list.append(func(sub_tensor)) output_tensor = torch.cat(tensors_list) return output_tensor
6d5d556f20a038a1bc723be0fd1a05d3dcd63801
82,392
def feedback(update, _): """ Start the feedback conversation Args: update: the update object _: unused variable Returns: The variable indicating to wait for feedback """ update.effective_message.reply_text( "Send me your feedback or /cancel this action. " "Note that only English feedback will be forwarded to my developer." ) return 0
91cf67795a10d9d31c150da24a2f59005d7ec5c3
82,396
def generate_sub_codons_right(codons_dict): """Generate the sub_codons_right dictionary of codon suffixes. Parameters ---------- codons_dict : dict Dictionary, keyed by the allowed 'amino acid' symbols with the values being lists of codons corresponding to the symbol. Returns ------- sub_codons_right : dict Dictionary of the 1 and 2 nucleotide suffixes (read from 5') for each codon in an 'amino acid' grouping. """ sub_codons_right = {} for aa in list(codons_dict.keys()): sub_codons_right[aa] = list(set([x[-1] for x in codons_dict[aa]] + [x[-2:] for x in codons_dict[aa]])) return sub_codons_right
a8d6f681b48cc59c5ddf6d04c72b1b9fcece72be
82,398
def rivers_with_station(stations): """For a list of MonitoringStation objects (stations), returns a set object containing the names of the rivers monitored by the given stations.""" # Set with the river names. Using "set" objects prevents duplicate names. rivers = set() for station in stations: # Add name to set rivers.add(station.river) return rivers
874d560076334e1663f41f64dae85cfa0c73334d
82,400
def parse_viewed_courses(json): """ Parse course viewed statements. Extract the students that have viewed the course from the course viewed statements. Return a list of those students. :param json: All the course viewed statements since a certain time. :type json: dict(str, str) :return: List of students who have viewed the course. :rtype: list(str) """ student_list = [] for statement in json['statements']: student_id = statement['actor']['account']['name'] if student_id not in student_list: student_list.append(student_id) return student_list
1bd42f6e79a93c6e163223fcd9d7ed41c448f809
82,402
from typing import Dict def get_default_result_dict(dir: str, data_name: str, index_name: str, fea_name: str) -> Dict: """ Get the default result dict based on the experimental factors. Args: dir (str): the path of one single extracted feature directory. data_name (str): the name of the dataset. index_name (str): the name of query process. fea_name (str): the name of the features to be loaded. Returns: result_dict (Dict): a default configuration dict. """ result_dict = { "data_name": data_name.split("_")[0], "pre_process_name": dir.split("_")[2], "model_name": "_".join(dir.split("_")[-2:]), "feature_map_name": fea_name.split("_")[0], "post_process_name": index_name } if len(fea_name.split("_")) == 1: result_dict["aggregator_name"] = "none" else: result_dict["aggregator_name"] = fea_name.split("_")[1] return result_dict
f6926431c898551e30ca4d6b86738d1fddea3a39
82,406
def xml_elements_equal(e1, e2): """ Compare 2 XML elements by content. :param e1: first XML element :param e2: second XML element :return: True if two xml elements are the same by content """ if e1.tag != e2.tag: return False if e1.text != e2.text: return False if e1.tail != e2.tail: return False if e1.attrib != e2.attrib: return False if len(e1) != len(e2): return False return all(xml_elements_equal(c1, c2) for c1, c2 in zip(e1, e2))
bdad51eafcb48c90e274e9b1ab635ff94842998d
82,409
import ipaddress def transform_index_ipv6_address(ipv6_str): """ Converts a substring of an SNMP index that contains an IPv6 address into a human readable format. Example: 254.128.0.0.0.0.0.0.14.134.16.255.254.243.135.30 => fe80::e86:10ff:fef3:871e Args: ipv6_str (str): SNMP index substring containing an IPv6 address. Returns: str: human readable IPv6 address """ parts = [u"{0:02x}".format(int(x)) for x in ipv6_str.split(u'.')] byte_string = u"" for p, i in enumerate(parts): if p % 2 != 0: byte_string += u'{}{}:'.format(parts[p - 1].lstrip(u'0'), parts[p]) result = str(byte_string[:-1]) if isinstance(result, bytes): result = result.decode('utf-8') return str(ipaddress.ip_address(result))
5aff3d92d7dc30f7041156f455e7bc56bd2f6ae3
82,410
def normalize(probs): """Normalizes a list of probabilities, so that they sum up to 1""" prob_factor = 1 / sum(probs) return [prob_factor * p for p in probs]
52ff964125858f40512d1cddccf7311b138d961d
82,415
def bound(x, m, M): """ Bound x between m and M. Parameters ---------- x : float Value. m : float Lower bound. M : float Upper bound. """ return min(max(x, m), M)
d80d56e7051ed6e6b2cb38fc522829e1a2b35303
82,419
import re def human_readable_key(key, delimiter=":"): """ >>> human_readable_key("participants:0:name") 'Participant 1 Name' >>> human_readable_key("participants_hello_world:0:name") 'Participants Hello World 1 Name' """ """Makes a delimited key human-readable.""" key = key.replace("_", " ") delimiter = re.escape(delimiter) key = re.sub(r's?{0}(\d+){0}?'.format(delimiter), lambda x: " " + str(int(x.group(1)) + 1) + " ", key) key = re.sub(delimiter, ": ", key) key = key.title() return key
04e4f0d48a7f767faf09853e3fef9941ffc6daee
82,422
def make_X(epochs): """Construct an n_samples x n_channels matrix from an mne.Epochs object.""" X = epochs.get_data().transpose(0, 2, 1).reshape(-1, epochs.info['nchan']) return X
9c73b7d1380a405423570e643298cb938629604c
82,423
def getEditDistance(str1, str2): """Return the edit distance between two strings. >>> getEditDistance("abc", "abcd") 1 >>> getEditDistance("abc", "aacd") 2 If one of the strings is empty, it will return the length of the other string >>> getEditDistance("abc", "") 3 The order of strings is not important, it will return the same output when strings are swapped. >>> getEditDistance("rice", "raise") 2 >>> getEditDistance("raise", "rice") 2 """ # if one of the strings is empty, the edit distance equals to the length of the other string # as all we need to do is insert all the characters from one string to other if len(str1)==0: return len(str2) if len(str2)==0: return len(str1) # neither one is empty # we will use wagner-fischer algorithm # matrix is one character bigger for each string, because we will start from 0 # matrix[y+1][x+1] will hold the Levenshtein distance between first y chars of str1 # and the first x chars of str2 matrix = [ [i for k in range(len(str2)+1)] for i in range(len(str1)+1)] # we want to start by putting the numbers 0 to length of the string in the first column and row for i in range(len(str2)+1): matrix[0][i]=i # as the difference between any string and an empty string is the length of that string, # we start from 0 (no difference between two empty strings) and go up to its length for i in range(len(str2)): # now that we completed the first row and column of our matrix, # proceed to process the rest for j in range(len(str1)): if str2[i] == str1[j]: matrix[j+1][i+1] = matrix[j][i] # no difference in this character, edit distance will equal to previous else: # this char is different, get the lowest edit distance to acquire the previous string and add one matrix[j+1][i+1] = min(matrix[j][i+1]+1,matrix[j+1][i]+1,matrix[j][i]+1) # as stated earlier, matrix[y+1][x+1] will hold the Levenshtein distance between first y chars of str1 # and the first x chars of str2. So the latest cell will hold the final edit distance return matrix[-1][-1]
9e03ba29f26017990e131ea6485bf3885975c28d
82,431
def get_host_port(hostport_str): """ Convert string in format `host:port` to host and port. """ host_port_args = hostport_str.split(':') #assume argument 1 on form host:port rotctld_host = host_port_args[0] rotctld_port = 4533 if len(host_port_args) > 1: rotctld_port = int(host_port_args[1]) return rotctld_host, rotctld_port
298761ccf5db6178d42a18783588c9ff3510815d
82,436
import collections def get_duplicates(iterable): """Return a set of the elements which appear multiple times in iterable.""" counter = collections.Counter(iterable) return {key for key, count in counter.items() if count > 1}
a1c75047da431d9701201852bda3178765048a87
82,438
def rowcol2idx(r,c,shape): """ Given a row, column, and matrix shape, return the corresponding index into the flattened (raveled) matrix. """ assert len(shape) == 2 rows,cols = shape return r * cols + c
3339ddde2d7593524b36a3ffffbd28ce3dfc8eac
82,447
def augment_docstr(func): """ Decorator to mark augmented function docstrings. """ func.__doc__ = '%%aug%%' + func.__doc__ return func
bccf2db8b4579440a13914525d617372438080ec
82,449
def zend_version(interp): """ Gets the version of the current Zend engine""" return interp.config.get_ini_w('zend_version')
27d502f58392a1f331ffbace68b7e1b356d772b0
82,456
def nonzero_sign(xy): """ A sign function that won't return 0 """ return -1 if xy < 0 else 1
8e39b84f662c1fef74f17d8dcb18cc8d3a5c117a
82,457
import re def insertAttribute(tag, attribute, newValue): """ Inserts the specified attribute in the specified tag, with the value newValue. (The attribute is inserted as the last attribute in the tag). """ value = re.sub(r'([/])>', '\n ' + attribute + '="' + newValue + '" \1>\n ', tag[1]) tag[1] = value return tag
4202e3b817637d334f80b268b06028015e98dba4
82,458
def isfile(line): """ Files are wrapped in FHS / FTS FHS = file header segment FTS = file trailer segment """ return line and (line.strip()[:3] in ["FHS"]) or False
2e33c98f18e44913d18dbbbbed7ec6bd7887f8ba
82,462
def length_error_message(identifier, min_length=None, max_length=None): """Build length error message.""" additional = [] if min_length: additional.append('at least length {}'.format(min_length)) if max_length: additional.append('at most length {}'.format(max_length)) body = ', '.join(additional) message = '{} identifier input must {}.'.format(identifier, body) return message
db6043bbc71cef2b70f1a301730333832f329c11
82,466
import hashlib def HashName(name): """Returns the hash id for a name. Args: name: The name to hash. Returns: An int that is at most 32 bits. """ md5hash = hashlib.md5() md5hash.update(name.encode('utf-8')) return int(md5hash.hexdigest()[:8], 16)
4059f4117c3c74880478d8453f302e5811c1f3df
82,467
def site(url, site_number=None): """ Returns the site which the url is of. """ if site_number is not None: site_number = int(site_number) if "panda" in url or site_number is 1: return "Mangapanda" elif "mangasee" in url or site_number is 2: return "Mangasee" else: raise Exception("Site Not Supported. See Help / Readme.md for Supported sites.")
36625478d5a1813f409cb4de59667b3e1eccc275
82,470
from bs4 import BeautifulSoup def remove_html_tags(text): """ Method used to remove the occurrences of html tags from the text Parameters: ----------------- text (string): Text to clean Returns: ----------------- text (string): Text after removing html tags. """ # Initiating BeautifulSoup object soup soup = BeautifulSoup(text, "html.parser") # Get all the text other than html tags. text = soup.get_text(separator=" ") return text
6a3d247f0c2a3b784f2cbe8baf3c0c96ccc154e2
82,471
from pathlib import Path def not_max_depth(path: Path, root: Path, depth: int): """Return true if depth of current path is less then max depth specified""" return len(path.relative_to(root).parents) <= depth
9d443b0b9c5104eff63b40fd453fdb64516180ba
82,472
def time_string(t): """ Return a string of format 'hh:mm:ss', representing time t in seconds Result rounded to nearest second. """ seconds = int(round(t)) h,rsecs = divmod(seconds,3600) m,s = divmod(rsecs,60) return str(h).zfill(2) + ":" + str(m).zfill(2) + ":" + str(s).zfill(2)
3969e8b0dcdeae833475cf5e2c4ba51ceef1d797
82,474
import copy def merge_configs(default, overwrite): """Recursively update a dict with the key/value pair of another. Dict values that are dictionaries themselves will be updated, whilst preserving existing keys. """ new_config = copy.deepcopy(default) for k, v in overwrite.items(): # Make sure to preserve existing items in # nested dicts, for example `abbreviations` if isinstance(v, dict): new_config[k] = merge_configs(default[k], v) else: new_config[k] = v return new_config
153a7e4beeda151e737284237a31e0a6bc9a606f
82,476
def wptemplate2directory(template_name, wikiprojects, directory=[]): """ Convert a WikiProject template name to a path within the WikiProject Directory hierarchy. :Parameters: template_name : str The name of a template (or redirected template) used to tag an article wikiprojects : dict The wikiprojects directory tree :Returns: A directory path as a list of strings ordered from top to bottom. E.g. "maths rating" returns ["stem", "mathematics"] """ for key in wikiprojects: val = wikiprojects[key] if 'name' in val and val['name'] == template_name: if 'root_url' in val and val['root_url'].endswith('Directory'): path_new = directory[:] path_new.append(key) path = wptemplate2directory( template_name, val['topics'], path_new) if path is not None: return path return directory if 'topics' in val: path_new = directory[:] path_new.append(key) path = wptemplate2directory(template_name, val['topics'], path_new) if path is not None: return path return None
e720803e83ac6ad965254ac5076d18cd5928c059
82,481
def platform_url(host='platform-api.newrelic.com', port=None, ssl=True): """Returns the URL for talking to the data collector when reporting platform metrics. """ url = '%s://%s/platform/v1/metrics' scheme = ssl and 'https' or 'http' server = port and '%s:%d' % (host, port) or host return url % (scheme, server)
c9a8c920702f174d1a8a7970b2b6eb792a3f921f
82,482
import re def is_assay_or_study(collection): """ Check if a given collection matches the format of path to a study or assay collection. """ return re.match( r'(assay|study)_[a-f0-9]{8}-([a-f0-9]{4}-){3}[a-f0-9]{12}', collection.name, )
427c8592b451690d34fd3854f9b9e8a79e1a8a3b
82,488
def sanitise(string, replacements={'_' : '-'}): """ Substitute characters that are used as separators in the pathname and other special characters if required """ for c in replacements: string = string.replace(c, replacements[c]) return string
221634ae19f2082a9556274dd24dccd2e1d5d05b
82,490
def is_widget(view): """ Returns `True` if the @view is any kind of widget. """ setts = view.settings() return (setts.get('is_widget') or setts.get('is_vintageous_widget'))
d3b5bbdb0ee1d23c53f17203f1ab2caf354f97c5
82,491
import platform import click def strip_style_win32(styled_output: str) -> str: """Strip text style on Windows. `click.style` produces ANSI sequences, however they were not supported by PowerShell until recently and colored output is created differently. """ if platform.system() == "Windows": return click.unstyle(styled_output) return styled_output
581ab6062efd99764aa15ac3b4574e2c165ec642
82,493
def fatorial(n, show=False): """ -> Calcula o fatorial de um número. :param n: O número a ser calculado. :param show: (opcional) Mostrar ou não a conta. :return: O valor do Fatorial de um número n. """ if show==False: num=1 for i in range(n,0,-1): num*=i return num elif show==True: num=1 for i in range(n,0,-1): num*=i if i>1: print(f'{i} x ',end='') else: print(f'{i} = ',end='') return num
ee63f9e94f6147b21a04480689c81b424e41f009
82,494
def _clean_suffix(string, suffix): """ If string endswith the suffix, remove it. Else leave it alone. """ suffix_len = len(suffix) if len(string) < suffix_len: # the string param was shorter than the suffix raise ValueError("A suffix can not be bigger than string argument.") if string.endswith(suffix): # return from the beginning up to # but not including the first letter # in the suffix return string[0:-suffix_len] else: # leave unharmed return string
3c5300976606969556af709609c6d8e9876c4a0a
82,495
def get_four_corners_from_2_corners(x1, y1, x2, y2): """ Function returns all corners of a bounding box given 2 corners Args: x1, y1, x3, y2 (int) - top left and bottom right corners of box returns list containing all corners of box. """ return [x1, y1, x1, y2, x2, y2, x2, y1]
9e099dcfe173e84ad3bc9b7a5ca0e7afc45191f9
82,496
def parse_ipmi_fru(output): """Parse the output of the fru info retrieved with ipmitool""" hrdw = [] sys_cls = None sys_type = None for line in output: if len(line.strip()) == 0: sys_cls = None sys_type = None continue items = line.split(':') if len(items) < 2: continue if line.startswith('FRU Device Description'): if 'Builtin FRU Device' in items[1]: sys_cls = 'oem' else: sys_cls = items[1].split('-FRU')[0].strip().lower() if sys_cls.startswith('psu'): sys_type = sys_cls sys_cls = 'psu' else: value = items[1].strip() sys_subtype = items[0].strip() if sys_type: hrdw.append((sys_cls, sys_type, sys_subtype, value)) else: items = sys_subtype.split(' ') hrdw.append((sys_cls, items[0], ' '.join(items[1:]), value)) return hrdw
d5e919bef40a7fe1ca59c39acc7c2ac741421032
82,500
def get_time_space(df, time_dim, lumped_space_dims): """ Get a DataFrame with the dim: [row (e.g.,time), column (e.g, space)] Parameters ---------- df : pandas.core.frame.DataFrame | time | lat | lon | PM2.5 | | 2011 | 66 | 88 | 22 | | 2011 | 66 | 99 | 23 | | 2012 | 66 | 88 | 24 | | 2012 | 66 | 99 | 25 | time_dim: time dim name (str), e.g., "time" lumped_space_dims: a list of the lumped space, e.g., ["lat","lon"] Returns ------- dataframe: with [row (e.g.,time), column (e.g, space)], e.g., | time | loc1 | loc2 | | | (66,88) | (66,99) | | 2011 | 22 | 24 | | 2012 | 23 | 25 | """ return df.set_index([time_dim]+lumped_space_dims).unstack(lumped_space_dims)
a02c160aa23b69f3cc7737b855c573c43a36045e
82,501