content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from os.path import dirname, join, exists def parse_requirements(fname='requirements.txt'): """ Parse the package dependencies listed in a requirements file. CommandLine: python -c "import setup; print(setup.parse_requirements())" """ require_fpath = join(dirname(__file__), fname) if exists(require_fpath): with open(require_fpath, 'r') as f: lines = [line.strip() for line in f.readlines()] lines = [line for line in lines if not line.startswith('#')] return lines
85f16a09676db95e86da95ba494c6d2d8f996387
10,532
def build_data(document, publication): """ Maps the variables of pt_law_downloader to our form """ return {'creator_name': publication['creator'], 'type': publication['type'], 'number': publication['number'], 'text': publication['text'], 'summary': publication['summary'], 'date': publication['date'], 'dre_doc_id': publication['dre_id'], 'dre_pdf_id': publication['pdf_id'], 'dr_pages': publication['pages'], 'dr_series': document['series'], 'dr_supplement': document['supplement'], 'dr_number': document['number'] }
1fc7353a288eb0a66be95786a9a6bb69139baf8d
10,533
import os def deduplicate(data_name, data_dict, data_dir): """ Return a local path for given data path Args: data_name: the basename of the target file data_dict: the existing mapping of local paths data_dir: the full path of the destination directory """ n_dups = 0 basename = data_name local_path = os.path.join(data_dir, basename) while local_path in data_dict.values(): root, ext = os.path.splitext(basename) local_path = os.path.join(data_dir, f'{root}-{n_dups}{ext}') n_dups += 1 return local_path
3cc8c41a424612ad6080b53f47af5d19981dc50f
10,534
def move_left(point): """Return a copy of `point`, moved left along the X-axis by 1. This function returns a tuple, not a Vector2D instance, and should only be used if performance is essential. Otherwise, the recommended alternative is to write `point + LEFT`. """ x, y = point return x - 1, y
4e06e6f0a7eb0f944c42c3952955441cf9b83728
10,535
import yaml def dict_to_yaml(dict_data): """Return YAML from dictionary. :param dict_data: Dictionary data :type dict_data: dict :returns: YAML dump :rtype: string """ return yaml.dump(dict_data, default_flow_style=False)
eba7896f63d499ef6c55b057320933dc0ff86333
10,536
def df_to_list(df): """Convert a pandas DataFrame values to a list of lists. Args: df (pandas.DataFrame): A DataFrame to convert to a dictionary. Returns: list: A list of lists """ return [df[column_name].values.tolist() for column_name in df.columns]
6bff41874f195783ee67a859f203f6fb20969269
10,537
def findNearestDate(date_list, date): """ Find closest datapoint of each uppermost sensor in time window. Adapted from https://stackoverflow.com/a/32237949/3816498 . Parameters ---------- date_list : array-like List of dates date : datetime The date, to which the nearest date in `items` should be found. Returns ------- nearest_date : datetime Nearest date to `date` in `date_list` time_delta : int Time difference in minutes """ nearest_date = min(date_list, key=lambda x: abs(x - date)) time_delta = (nearest_date - date).total_seconds() / 60. return nearest_date, time_delta
431f93212e6ffeddb994e727d94db8ccfe67315c
10,538
from functools import reduce def prod(stuff): """ prod(stuff): compute the product (i.e. reduce with '*') of the elements of 'stuff'. 'stuff' must be iterable.""" return reduce(lambda x, y: x*y, stuff)
5ddaaaced8c187018d9a5113ea3a91a21deb6e0b
10,539
def GetIonPositions(names_and_pos, ion_input): """ input: names_and_pos dict<string, Vector3>: dictionary that contains the position of each labelled element present. input: ion_input dict<string, Ionic>: dict of Ionic class wich contains info with no radius value output: dict<string, Vector3>: refined dict with only the ions from the ion_input dict """ d = names_and_pos.copy() for element_num in names_and_pos: element = ''.join([i for i in element_num if not i.isdigit()]) #removes int from string if element not in ion_input: del d[element_num] return d
51cbcdd34256546bc37414dd5ec5fb06d0af0035
10,540
from typing import List from typing import Dict def compute_images2labels(images: List[str], labels: List[str]) -> Dict: """Maps all image paths to a list of labels. Args: images (List[str]): The list of image paths. labels (List[str]): The list of labels. Returns: Dict: The mapping between the image paths and the labels. """ # Init images2labels dict images2labels = {} # Find label for each image for image_path in images: for label in labels: if f"/{label}/" in image_path: images2labels[image_path] = label return images2labels
1af99cd93bea7530857502665d4ce388a9d8f9ba
10,541
import difflib def similarString(inString, inList): """ check if there is an object that resembles the given string in the list :param inString: string to check for :type inString: string :param inList: list of strings to choose from :type inList: list :return: the string that resembles the input the most :rtype: string """ remove = .1 for i in range(10): matches = difflib.get_close_matches(inString, inList, n=3, cutoff=1.0 - (i * remove)) if matches: return matches[0]
c61b9a767b0cd1fa063b1c23a45b3e4063a7fa55
10,542
def sum_counts(fname, R1=False): """Collect the sum of all reads for all samples from a summary count file (e.g. from collect_counts)""" count = 0 with open(fname, 'r') as infh: for line in infh: l = line.split() if R1: if l[2] == "R1": count += float(l[3]) else: count += float(l[3]) return count
6ce7e156e1100f3106836e0b34caf6750baa18e1
10,544
def parse_tags(tag_list): """ >>> 'tag2' in parse_tags(['tag1=value1', 'tag2=value2']) True """ tags = {} for t in tag_list: k, v = t.split('=') tags[k] = v return tags
962c7ba737c56b46f9783912c528e4da6e4ee145
10,545
import os.path def relative(src, dest): """ Return a relative path from src to dest. >>> relative("/usr/bin", "/tmp/foo/bar") ../../tmp/foo/bar >>> relative("/usr/bin", "/usr/lib") ../lib >>> relative("/tmp", "/tmp/foo/bar") foo/bar """ if hasattr(os.path, "relpath"): return os.path.relpath(dest, src) else: destlist = os.path.normpath(dest).split(os.path.sep) srclist = os.path.normpath(src).split(os.path.sep) # Find common section of the path common = os.path.commonprefix([destlist, srclist]) commonlen = len(common) # Climb back to the point where they differentiate relpath = [ os.path.pardir ] * (len(srclist) - commonlen) if commonlen < len(destlist): # Add remaining portion relpath += destlist[commonlen:] return os.path.sep.join(relpath)
b048175956a90566f7dd324821a71ace5e4ebe0b
10,546
def coerce_bool(value): """Coerce a string to a bool, or to None""" clean_value = str(value).strip().lower() if clean_value in ["yes", "1", "true", "t"]: return True elif clean_value in ["no", "n0", "0", "false", "f"]: return False elif clean_value in ["", "na", "n/a", "none"]: return None else: raise ValueError("Could not determine truthiness of value {!r}".format(value))
dd979b73717b2c86fe28cf7d1bdcc89020eda163
10,547
def count_consonants(string): """ Function which returns the count of all consonants in the string \"string\" """ consonants = "bcdfghjklmnpqrstvwxz" counter = 0 if string: for ch in string.lower(): if ch in consonants: counter += 1 return counter
4a852b3ec9f8f660d71dde547cbffb0c25b1e209
10,548
def soft_contingency_table(resp1, resp2): """Compute the soft contingency table for two responsibility matrices Args: resp1 (numpy array): N x K_1 responsibility matrix - each row is a probability vector for one of the N items belonging to each of K_1 modes resp1 (numpy array): N x K_2 responsibility matrix - each row is a probability vector for one of the N items belonging to each of K_2 modes Returns: (numpy array): K_1 x K_2 contingency table for soft clustering - defined as resp1^T resp2 """ return resp1.T @ resp2
50ee20e05755d320fe9f130a6ca57728d1e1b5ad
10,550
def allowed_request_lot_filters(lot_filters): """Create a set of (name, value) pairs for all form filters.""" filters = set() def recursive_search(filters): more_filters = set() for f in filters: more_filters.add((f['name'], f['value'])) children = f.get('children') if children: more_filters.update(recursive_search(children)) return more_filters # recursive search to account for sub-filters (i.e. sub-categories) for section in lot_filters: filters.update(recursive_search(section['filters'])) return filters
1b265e0233e08d0cab4248f28424b5b26ad28340
10,551
def _check_if_StrNotBlank(string): """ check if a sting is blank/empty Parameters ---------- Returns ------- : boolean True if string is not blank/empty False if string is blank/empty """ return bool(string and string.strip())
e5de1d902f8e3931d23e04c6ba825b17d90e8d1d
10,553
from math import asin, atan2, sqrt, degrees def normal2SD(x,y,z): """Converts a normal vector to a plane (given as x,y,z) to a strike and dip of the plane using the Right-Hand-Rule. Input: x: The x-component of the normal vector y: The y-component of the normal vector z: The z-component of the normal vector Output: strike: The strike of the plane, in degrees clockwise from north dip: The dip of the plane, in degrees downward from horizontal """ # Due to geologic conventions, positive angles are downwards z = -z # First convert the normal vector to spherical coordinates # (This is effectively a plunge/bearing of the normal vector) r = sqrt(x*x + y*y + z*z) plunge = degrees(asin(z/r)) bearing = degrees(atan2(y, x)) # Rotate bearing so that 0 is north instead of east bearing = 90-bearing if bearing<0: bearing += 360 # If the plunge angle is upwards, get the opposite end of the line if plunge<0: plunge = -plunge bearing -= 180 if bearing<0: bearing += 360 # Now convert the plunge/bearing of the pole to the plane that it represents strike = bearing+90 dip = 90-plunge if strike > 360: strike -= 360 return strike, dip
6b8bcfb9444352f8722aa1742909544202fa32d9
10,554
def append_result(results, result_hash, item): """Append to results, creating an index if needed.""" if result_hash not in results: results[result_hash] = [item] else: results[result_hash].append(item) return results
1b2af69ad291f885a8f52ce95ba7e0d82792e835
10,556
def rotateToHome(x, y): """Rotate to the home coordinate frame. Home coordinate frame starts at (0,0) at the start of the runway and ends at (0, 2982 at the end of the runway). Thus, the x-value in the home coordinate frame corresponds to crosstrack error and the y-value corresponds to downtrack position. Args: x: x-value in local coordinate frame y: y-value in local coordinate frame """ rotx = 0.583055934597441 * x + 0.8124320138514389 * y roty = -0.8124320138514389 * x + 0.583055934597441 * y return rotx, roty
4d4c5cd5a3e5186d81bff60266c99128ad8a9d51
10,557
def uniq(seq): """ the 'set()' way ( use dict when there's no set ) """ return list(set(seq))
e8136a633fdf08d7c79e8d47b8c90dc2409e9d2f
10,558
def gen_range(start, end): """Return a list of the numbers between |start| and |end|. This filters out surrogate pairs. """ return [x for x in range(start, end) if x < 0xd800 or x >= 0xe000]
009b0af854b5ad6d8a06d01a24cf08b26a644dbc
10,559
import json def get_json_data(): """Fixture to return the json data as a Python variable.""" def _method(_file): """Fixture to return the json data as a Python variable, given a file location Args: _file (str): The location of the json file to input data from. Returns: dict: The data structure from the JSON file. """ with open(_file) as file: data = json.load(file) return data return _method
8544db10f0d4558c030ffb8a1455aed91b832ac3
10,561
import requests def get_channel_id(username, API_KEY): """ i might have been entering the wrong usernames or something but, this thing only works half of the time """ url = f"https://www.googleapis.com/youtube/v3/channels" payload = {"forUsername":username, "part":"id,snippet", "key":API_KEY} channel_id_request = requests.get(url = url, params = payload) data = channel_id_request.json() channel_id = (data["items"][0]["id"]) return channel_id
da90cdec623d5c3e4ee210cc133ee9900db340f9
10,564
def average_data(df): """ Average data over eval runs then scenario runs """ df = df.astype({"solved": int}) avg_eval_df = df.groupby(["scenario", "agent", "run"]).mean().reset_index() eval_err = df.groupby(["scenario", "agent", "run"]).sem().reset_index() # run_err = avg_eval_df.groupby(["scenario", "agent"]).sem().reset_index() run_avg = avg_eval_df.groupby(["scenario", "agent"]).mean().reset_index() return run_avg, eval_err
e0f6bc8f8ee59b6b5855324baf6a2ecee4a25e2e
10,565
def _GetStarredIssues(cnxn, logged_in_user_id, services): """Get the set of issues that the logged in user has starred.""" starred_iids = services.issue_star.LookupStarredItemIDs( cnxn, logged_in_user_id) return set(starred_iids)
6185371138da1e79673d7347770073a18f82b099
10,568
import pytz def AdaptReadableDatetime(date_obj): """Adapts a datetime.datetime object to its ISO-8601 date/time notation.""" try: date_obj = date_obj.astimezone(pytz.utc) except ValueError: pass # naive datetime object return date_obj.isoformat()
feceafb58995001acdeb2285fa5782bec7cc756d
10,569
def prompt_choice(length, select_action, per_page): """ Prompt the user for a choice of entry, to continue or to quit. An invalid choice will repeat the prompt. @param length: the largest choosable value @param select_action: description of what choosing an entry will result in. @param per_page: number of results to offer next. Set to 0 to hide "next" option. """ prompt = 'What do you want to do? [{0}] to {1}, {2}[Q]uit: '.format( '1' if length == 1 else '1–{length}', select_action, '[N]ext {per_page}, ' if per_page else '') while True: choice = input(prompt.format(length=length, per_page=per_page)) try: int_choice = int(choice) except ValueError: int_choice = None if choice.lower() == 'n' and per_page: return None elif choice.lower() == 'q': exit(0) elif int_choice and (1 <= int_choice <= length): return int_choice else: print('Invalid choice. Try again!')
4b957479d96e5b8c642db4e888faf92c8c9cf945
10,570
import collections def make_map(connections): """Return the map of caves based on connections.""" neighbors = collections.defaultdict(list) for start, end in connections: neighbors[start].append(end) neighbors[end].append(start) return neighbors
10f35208fa75b5ae4723d9d55e022a0fcc857e51
10,571
from typing import Tuple def info_to_table(rows: list) -> Tuple[list, list]: """ Formats raw row data into a table format that will be used with other render functions. This function is where column headers should be defined. Arguments: rows(list): Rows of data Return: List : List of column names List : Full table representation of data """ columns = ["Address", "Name", "Type"] full_table = [] for row in rows: # create table full_table.append([row["address"], row["name"], row["type"]]) return columns, full_table
3f92acccb3d93aa539ef34f136c557ca3ecb324b
10,572
import os def is_uptodate(picklefile, path_list=None, min_time=1343682423): """Check if the pickle files is uptodate compare to a list of files. If no files are given, the pickle files is checked against it\' current directory""" if not os.path.exists(picklefile): return False if path_list is None: dirpath = os.path.dirname(picklefile) path_list = [ os.path.join(dirpath, file) for file in \ os.listdir(dirpath)] assert type(path_list) == list, 'is_update expect a list of files' pickle_date = os.path.getctime(picklefile) if pickle_date < min_time: return False for path in path_list: try: if os.path.getmtime(path) > pickle_date: return False except Exception: continue #all pass return True
57b5e5377780a043e547ceb4657f97d111d84062
10,574
def set_value(obj, value): """ 设置固定数值 :param value: 数值 :return: 数值 """ return value
1ecb69bebb841a8a36d752544560f87d95f07c5f
10,575
def calc_distances(x, targets): """ within and between class distances """ def c_s(l): l.sort() s, r = 0, 0 n = len(l) for i in range(n - 1): s += l[i] r += 2 * (n - i - 1) * l[i] s += l[-1] return (n - 1) * s - r within_class = 0 for y, inclass_x in targets.items(): within_class += c_s(inclass_x) within_class = 2 * within_class between_class = 2 * c_s(x) - within_class return within_class, between_class
079c7085bb275b905b7342cfca98db074f801c53
10,576
def check_symmetry_and_dim(number, dim=3): """ check if it is a valid number for the given symmetry Args: number: int dim: 0, 1, 2, 3 """ valid = True msg = 'This is a valid group number' numbers = [56, 75, 80, 230] if dim not in [0, 1, 2, 3]: msg = "invalid dimension {:d}".format() valid = False else: max_num = numbers[dim] if number not in range(1, max_num+1): valid = False msg = "invalid symmetry group {:d}".format(number) msg += " in dimension {:d}".format(dim) return valid, msg
1097eb78dbd58f0e164ae5f10fdc9f88a3c00ccc
10,577
import sys def is_test(): """ Detects if the application is currently running as a celery worker. """ return 'test' in sys.argv[1]
5552a8e3d7adf1b0fc5f17c2ae56c5fa640dc4a5
10,578
import torch def get_all_dists(x, y): """Get L2 distance of x from each item in y.""" return torch.sqrt(torch.sum(torch.pow(x - y, 2), 1))
9c54cdd1887b71e872b240e87cf12cc5df9abb1e
10,579
def _get_suggestions_index(name): """Returns suggestions index name for a regular index name.""" return f'df_suggestions_{name}'
4cd2294e89f05dfbefe65ff4604c43818880d6c9
10,580
def calculate_immediate_post_dominators(nodes, _pdom, _spdom): """ Calculate immediate post dominators for all nodes. Do this by choosing n from spdom(x) such that pdom(n) == spdom(x). """ _ipdom = {} for node in nodes: if _spdom[node]: for x in _spdom[node]: if _pdom[x] == _spdom[node]: # This must be the only definition of ipdom: assert node not in _ipdom _ipdom[node] = x else: # No strict post dominators, hence also no # immediate post dominator: _ipdom[node] = None return _ipdom
17fb44a47a440c8c93f58262132ab0f65a9566a5
10,585
def add_two_polynomials(polynomial_1: list, polynomial_2: list) -> list: """ This function expects two `polynomials` and returns a `polynomial` that contains their `sum`. :param polynomial_1: First polynomial :param polynomial_2: Second polynomial :return: A polynomial representing the sum of the two polynomials """ # declaring the polynomial that will be returned (the sum) return_polynomial = [] # storing the length of the shortest polynomial via the inbuilt min() function minimum_length = min(len(polynomial_1), len(polynomial_2)) # adding the coefficients for every power of X up until the shortest one ends and appending the sum for k in range(minimum_length): return_polynomial.append(polynomial_1[k] + polynomial_2[k]) # figuring out which polynomial is longer and appending all of the coefficients that are left if len(polynomial_1) > len(polynomial_2): # using the inbuilt function range() to iterate through the coefficients that are left for k in range(len(polynomial_2), len(polynomial_1)): return_polynomial.append(polynomial_1[k]) else: # I intentionally checked both for '>' and '<' in order to rule out the case in which they are equal if len(polynomial_1) < len(polynomial_2): # using the inbuilt function range() to iterate through the coefficients that are left for k in range(len(polynomial_1), len(polynomial_2)): return_polynomial.append(polynomial_2[k]) return return_polynomial
e2fed8be5f35f1c306b78b69c608f90668aeb2f1
10,587
from math import sqrt def prime_def(num): """ Поиск максимального простого делителея числа :param num: :return: """ prime_set = set(range(1, num + 1)) #print(prime_set) for i in range (2, int(sqrt(num))): if i in prime_set: prime_set -= set(range(2*i, num + 1, i)) #print(prime_set) n = [] for i in prime_set: if num % i == 0: n.append(i) return max(n)
619a4310dd9540e8354143fa2871ceddb78b4237
10,589
def dict_from_two_lists(keys: list, values: list): """Creates a dictionary from a list of keys and a list of values. Examples: >>> keys = ('bztar', 'gztar', 'tar', 'xztar', 'zip')\n >>> values = ('.tbz2', '.tgz', '.tar', '.txz', '.zip')\n >>> newdict = dict_from_two_lists(keys, values)\n >>> pprint(newdict)\n {'bztar': '.tbz2', 'gztar': '.tgz', 'tar': '.tar', 'xztar': '.txz', 'zip': '.zip'} Args: keys (list): Reference the keys list values (list): Reference the values list Returns: dict: Returns a dictionary """ result = {k: v for k, v in zip(keys, values)} return result
3865a8e5a890dc00e69ea3feafc161f8617697ff
10,590
def hashable_index(tuple_idx): """Return an hashable representation of a tuple of slice object We add this because the slice object in python is not hashable. Parameters ---------- tuple_idx : tuple A tuple of slice/int objects Returns ------- ret : tuple A hashable representation of the slice data """ l = [] for ele in tuple_idx: if isinstance(ele, slice): l.append(ele.__reduce__()) else: l.append(ele) return tuple(l)
e83d4db426053cc64f9ffda3b938bb85395e4741
10,591
import torch def gaussian_kernel1d( kernel_size: int, sigma: float, device: torch.device, dtype: torch.dtype ): """1D Gaussian kernel.""" khalf = (kernel_size - 1) / 2.0 x = torch.linspace(-khalf, khalf, steps=kernel_size, dtype=dtype, device=device) pdf = torch.exp(-0.5 * (x / sigma).pow(2)) return pdf / pdf.sum()
f76fc18500160510f162e93bb68803a36ce4633a
10,594
def interrupt() -> int: """interrupt hparams """ return 2
6ba8b856ae968283db38a12222bced4217f83868
10,595
def encrypt_file(filepath): """Encrypt file contents to base64. :param filepath: the path of the file. """ try: # if it starts with ~ # os.path.expanduser with open(filepath) as inf: file_contents = inf.read() return file_contents.encode('base64') except IOError: return filepath
6107418061dd26a5bb21253c376796bdec673783
10,596
def flex_add_argument(f): """Make the add_argument accept (and ignore) the widget option.""" def f_decorated(*args, **kwargs): kwargs.pop('widget', None) return f(*args, **kwargs) return f_decorated
2a93e5af569fcf0ec51f469f0b074379d3d663ff
10,597
import argparse def parse_args(): """ Obtain the simulation options from the input arguments Return a tuple with the following elements: * config_file: Relative location of the simulation configuration file * show_plot: If true, plots shall be generated after the simulation * from_file: If true, the output of the SNN is taken from a local file that was generated on a previous simulation """ def str2bool(v): if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('', 'no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') parser = argparse.ArgumentParser( usage="main.py [-h] [-s] [-f] [-c, --config config_file]") parser.add_argument("-c", "--config", type=str, required=True, help="Relative location of the configuration file" ) parser.add_argument("-s", type=str2bool, default=False, nargs='?', const=True, metavar="", help="Show the plot after the simulation" ) parser.add_argument("-f", type=str2bool, default=False, nargs='?', const=True, metavar="", help="Fetch the output data from local file, " "instead of running the SNN" ) # Get the values from the argument list args = parser.parse_args() config_file = args.config show_plot = args.s from_file = args.f return (config_file, show_plot, from_file)
1efa48f0917145efb0428e86002a19e83d83755a
10,598
def get_time(raw_time): """ Get treated time value from the raw provided time. # 2020/09/05T05:21:48z # 2020-09-05T05:21:48z # dtr = dt.replace("/","-")[:-1] :param raw_time: <string> :return treated_time: <datetime> """ treated_time = raw_time.replace("/", "-") return treated_time[:-1]
cccdfc8710b63c2f273fa0e8c638c5fc6e66a1b8
10,599
def get_round_from_jitemid(jitemid, cursor): """Take a jitemid, return a round.""" round_jitemid_mapping = { (0, 'br negative one') } # if jitemid is a key in the dictionary: # return the value # otherwise just return "UNKNOWN ROUND" return round
9242e098d31312c3181377d5bb8f4dfaf33c29ca
10,600
import torch def kl_loss_diag(mean, logvar): """ KL divergence of normal distributions with diagonal covariance and standard multivariate normal. :param mean1: mean of distribution 1 :param logvar1: logarithm of the covariance diagonal of distribution 1 :return: KL divergence of the given distribution and standard multivariate normal """ result = -0.5 * torch.sum(logvar - torch.pow(mean, 2) - torch.exp(logvar) + 1, 1) return result.mean()
e8cedc4ccebc120a4367f9ce52ac96210beb0b5f
10,601
def add_class(add, class_): """Add to a CSS class attribute. The string `add` will be added to the classes already in `class_`, with a space if needed. `class_` can be None:: >>> add_class("foo", None) 'foo' >>> add_class("foo", "bar") 'bar foo' Returns the amended class string. """ if class_: class_ += " " else: class_ = "" return class_ + add
b5f12ea7a5c573b65ebbd84d987a5de5090e33d0
10,602
import re def to_lower_camel_case(str_to_convert): """ This function will convert any string with spaces or underscores to lower camel case string :param str_to_convert: target string :return: converted string """ if type(str_to_convert) is not str: raise TypeError("The method only take str as its input") str_to_convert = str_to_convert.replace("_", " ") tmp = re.split(r'\s|-', str_to_convert) return "".join([item.lower() for i, item in enumerate(tmp) if i == 0] + [item.capitalize() for i, item in enumerate(tmp) if i != 0])
8bfd591fcbfcff51b463596266cba1403f2d6153
10,603
def ordinal_suffix(n): """Return the ordinal suffix for a positive integer >>> ordinal_suffix(0) '' >>> ordinal_suffix(1) 'st' >>> ordinal_suffix(2) 'nd' >>> ordinal_suffix(3) 'rd' >>> ordinal_suffix(4) 'th' >>> ordinal_suffix(11) 'th' >>> ordinal_suffix(12) 'th' >>> ordinal_suffix(13) 'th' >>> ordinal_suffix(21) 'st' >>> ordinal_suffix(22) 'nd' >>> ordinal_suffix(23) 'rd' >>> ordinal_suffix(101) 'st' >>> ordinal_suffix(111) 'th' >>> ordinal_suffix(112) 'th' >>> ordinal_suffix(113) 'th' >>> ordinal_suffix(121) 'st' >>> ordinal_suffix(1111) 'th' >>> ordinal_suffix(1322) 'nd' >>> ordinal_suffix('horse') '' """ try: n = int(n) except Exception: return '' if n < 1: return '' elif n >= 100: return ordinal_suffix(n%100) elif 11 <= n <= 13: return 'th' elif n%10 in (1,2,3): return ('st','nd','rd')[n%10-1] else: return 'th'
53617737aaf28c2d239301358f01d1b0cea9f6cb
10,604
def _collect_facts(resource): """Transfrom cluster information to dict.""" facts = { 'identifier' : resource['ClusterIdentifier'], 'create_time' : resource['ClusterCreateTime'], 'status' : resource['ClusterStatus'], 'username' : resource['MasterUsername'], 'db_name' : resource['DBName'], 'availability_zone' : resource['AvailabilityZone'], 'maintenance_window': resource['PreferredMaintenanceWindow'], } for node in resource['ClusterNodes']: if node['NodeRole'] in ('SHARED', 'LEADER'): facts['private_ip_address'] = node['PrivateIPAddress'] break return facts
a881451ac409288b93160d4cec4b27a3547fe3f9
10,605
def leave_one_in(feature_groups): """For each group, return a copy of just that group Args: feature_groups (list) The feature groups to apply the strategy to Returns: A list of feature dicts """ return feature_groups
5933925d909b6777f916b6bc585734dd768477b3
10,606
def _strftime(d): """ Format a date the way Atom likes it (RFC3339?) """ return d.strftime('%Y-%m-%dT%H:%M:%SZ%z')
1eebf1bff9c68ba4649f1377f16b4b9feb737f01
10,608
def decr_id(id, id_remove): """Decrement a single id, with the aim of closing the gap at id_remove. The logic used is similar to that incr_id_after. """ k = len(id_remove) if len(id) >= k and id[:k-1] == id_remove[:k-1] and id[k-1] > id_remove[k-1]: return id[:k-1] + (id[k-1] - 1,) + id[k:] else: return id
51f9a2254e1736a4e6014a8714789186ee4a3c63
10,611
def get_latex_permutations(rows, cols, tarray, title): """ Creates a latex table as a string. The string can be cut and paste into the dissertation. :param rows: number of rows in the board. :param cols: number of columns in the board. :param tarray: 2D array of the table contents. :param title: Table string title. :return: a string of the completed LaTeX Table. """ latex_str = "\\begin{sidewaystable}\n" latex_str += "\\centering\n" latex_str += "\\begin{small}\n" latex_str += "\\setlength{\\arrayrulewidth}{0.5pt}" latex_str += " % thickness of the borders of the table\n" latex_str += "\\setlength{\\tabcolsep}{0.75em}" latex_str += " % space between the text and the left/right border\n" latex_str += "\\arrayrulecolor{black}\n" latex_str += "\\begin{tabular}{|" # Configure the number of columns for _ in range(cols): latex_str += "c|" latex_str += "}\n" # We are now into the table contents! for row in range(rows): latex_str += "\\hline\n" for col in range(cols): cell_contents = "{}".format(tarray[row][col]).replace("\n", "") # Ensure the table coorindate cells are in bold font if col == row: cell_contents = "\\cellcolor{blue!25}\\textbf{" \ + cell_contents + "}" elif col == 0 or row == 0: cell_contents = "\\cellcolor{gray!60}\\textbf{" \ + cell_contents + "}" # Ensure the all diagonal cells are highlighted latex_str += " " + cell_contents + " " if (col + 1) == cols: latex_str += "\\\\\n" else: latex_str += "&" latex_str += "\\hline\n" latex_str += "\\end{tabular}\n" latex_str += "\\end{small}\n" latex_str += "\\caption{" + title + "}\n" latex_str += "\\label{tab:" + title.replace(" ", "") + "}\n" latex_str += "\\end{sidewaystable}\n" return latex_str
261bb367a948731748441d10afd4dcee793624b5
10,613
def merge_consecutive_timestamps(timestamps): """ Merges consecutive timestamps in a list if they're less than 2 seconds apart Example: [(0,5), (5,10), (20,30)] gets combined into [(0,10),[20,30] """ result = [] i = 0 while i < len(timestamps): (start, end) = timestamps[i] # check if we're not already at the last element if i < len(timestamps) - 1: (start_next, end_next) = timestamps[i + 1] # merge if less than 2 seconds apart if abs(end - start_next) < 2: result.append((start, end_next)) i += 1 else: result.append((start,end)) else: result.append((start, end)) i += 1 return result
9ec3817dd62771d7269892ae590a160ea581fa53
10,617
def central_derivative(xs, ys): """ central derivative at x[1] """ return (ys[2]-ys[0])/(xs[2]-xs[0])
ef3e397eda004f9f2afbd2a5bd5c8c23b72755c5
10,619
import torch def hard_sigmoid(tensor: torch.Tensor, inplace: bool = False) -> torch.Tensor: """ Applies HardSigmoid function element-wise. See :class:`torchlayers.activations.HardSigmoid` for more details. Arguments: tensor : Tensor activated element-wise inplace : Whether operation should be performed `in-place`. Default: `False` Returns: torch.Tensor: """ return torch.nn.functional.hardtanh(tensor, min_val=0, inplace=inplace)
5f4d87749ddca014076f46e0af6e9b3c4308ddf7
10,621
import re def scan(file, blocksize=10000): """Get the number of molecules in an sd file""" pat = re.compile("^[$][$][$][$]", re.MULTILINE) text = file.read(blocksize) count = 0 while text: g = pat.findall(text) count += len(g) if text[-1] == "$" and text[-4]!='$': next = text[-6:] text = "".join([next, file.read(blocksize)]) else: text = text = file.read(blocksize) return count
e3b1af1e65e28146d609a0c2841f195ad783b59e
10,622
import pickle def filter_by_freq(word_list, chained_words, min_freq): """ Replaces words occuring less than min_freq with the unknown tag <unk> and dumps the replaced words to a file (via pickle) """ unk_list = [] print("Filtering words occuring less than %d times..." % min_freq) for word in word_list: if word_list.count(word) < min_freq: chained_words = chained_words.replace(' ' + word + ' ', ' <unk> ') unk_list.append(word) print("Filtered %d words." % len(unk_list)) if len(unk_list) > 0: unk_list_path = 'embeddings/unk_list.pkl' with open(unk_list_path, 'wb') as ufp: pickle.dump(unk_list, ufp) print("Saved filtered words in %s." % unk_list_path) word_list = chained_words.split(' ') return chained_words, word_list
76186e9cb0dcc494575bc00c6994601e4a891483
10,623
def display_time(seconds, granularity=1): """ Turns seconds into weeks, days, hours, minutes and seconds. Granularity determines how many time units should be returned. EG: # 2 time unites, week and day 1934815, 2 = '3 weeks, 1 day' # 4 time units 1934815, 4 = '3 weeks, 1 day, 9 hours, 26 minutes' """ result = [] intervals = ( # weeks weren't necessary so I removed them ('days', 86400), # 60 * 60 * 24 ('hours', 3600), # 60 * 60 ('minutes', 60), ('seconds', 1), ) for name, count in intervals: value = seconds // count if value: seconds -= value * count if value == 1: name = name.rstrip('s') result.append("{} {}".format(value, name)) return ' '.join(result[:granularity])
d4b94ffafcdbda99526ecc906ed8379d7fc5edab
10,624
def join_values(values): """Return the values as a space-delimited string.""" return " ".join((str(v) for v in values))
60fd4acbafc4619c134ae67d224df408cad12ab0
10,626
def add_base32_padding(base32_input_no_padding): """Return a base32 string with the correct padding Parameters: base32_input_no_padding (str): base32 string without padding """ result = base32_input_no_padding while ((len(result) % 8) != 0): result += '=' return result
9f98d4c705b5932171954aee3fb90c34681ea7fc
10,627
def magic(num_lst, n, k): """对 num_lst 使用 k 次魔力""" a = num_lst[:] for j in range(k): b = a[1:] # 又一种“移动”方法 b.append(a[0]) for i in range(n): a[i] = (a[i] + b[i]) % 100 # 无差别对 100 取余 return a
1f8e1e766add89e0f03a6ef778fe6c1100886125
10,628
def format_number(number): """ Round number to two decimals :param number: input number :return: number rounded to two decimals """ return format(float(number), '.2f')
621e48e46aef43c6ede38b62a3608b8e193d7df7
10,629
def kneel_down(score_diff, timd, secs_left, dwn): """Return 1 if the offense can definitely kneel out the game, else return 0.""" if score_diff <= 0 or dwn == 4: return 0 if timd == 0 and secs_left <= 120 and dwn == 1: return 1 if timd == 1 and secs_left <= 87 and dwn == 1: return 1 if timd == 2 and secs_left <= 48 and dwn == 1: return 1 if timd == 0 and secs_left <= 84 and dwn == 2: return 1 if timd == 1 and secs_left <= 45 and dwn == 2: return 1 if timd == 0 and secs_left <= 42 and dwn == 3: return 1 return 0
2865d32484015f0e4a8f1cd28d68ec1a57847ce4
10,632
import math def quantity_string(quantity, unit, computer_prefix=False): """Return a human-friendly string representing a quantity by adding prefixes and keeping the number of significant figures low. 'computer_prefix' determines whether each prefix step represents 1024 or 1000. Examples: >>> quantity_string(1024, "B", True) '1.0KB' >>> quantity_string(40000, "m", False) '40km' >>> quantity_string(0.01, "m", False) '0.010m' """ if quantity == 0: return "0%s" % unit # Units like m, B, and Hz are typically written right after the number. # But if your unit is "file" or "image" then you'll want a space between # the number and the unit. if len(unit) > 2: space = " " else: space = "" if computer_prefix: prefixes = ["", "K", "M", "G", "T"] prefix_multiplier = 1024 else: prefixes = ["", "k", "M", "G", "T"] prefix_multiplier = 1000 divisor = 1 for p in prefixes: digits = int(math.log10(quantity / divisor)) + 1 if digits <= 3: format = "%%.%df%s%s%s" % (max(2 - digits, 0), space, p, unit) return format % (float(quantity) / divisor) divisor *= prefix_multiplier # No prefix available. Go scientific. return "%.2e%s%s"% (quantity, space, unit)
04aa8743e045c2aaf80115984c30f091d695de77
10,633
def force_categorical_determination(table): """ Find string columns using 'computationally expensive' approach """ source_shape = table.shape columns_number = source_shape[1] if len(source_shape) > 1 else 1 categorical_ids = [] non_categorical_ids = [] # For every column in table make check for first element for column_id in range(0, columns_number): column = table[:, column_id] if columns_number > 1 else table col_shape = column.shape for i in column: # Check if element is string object or not until the first appearance if len(col_shape) == 2 and isinstance(i[0], str): # Column looks like [[n], [n], [n]] categorical_ids.append(column_id) break elif len(col_shape) == 1 and isinstance(i, str): # Column [n, n, n] categorical_ids.append(column_id) break if column_id not in categorical_ids: non_categorical_ids.append(column_id) return categorical_ids, non_categorical_ids
012fb261c39e2bd5106df4e88af9703821158146
10,634
import requests import sys def get_calls(article_name, number_of_lines=7): """ returns hover data in raw format this is used in app callbacks to display hover summary data in a text box just enter article name like "Atom" or "Proton" """ S = requests.Session() URL = "https://en.wikipedia.org/w/api.php" PARAMS = { "action": "query", "format": "json", "titles": article_name, "prop": "extracts", "exsentences": number_of_lines, "exlimit": "1", "explaintext": "1", "formatversion": "2", } R = S.get(url=URL, params=PARAMS) DATA = R.json() sys.stdout.flush() return DATA
05ba3b39b39c387af4d91b91d3dd4eb03a9cda27
10,636
def vector_to_dictionary(vector, layers): """ Convert the parameter vector of a model into a dictionary used by the model Arguments: vector -- one-dimensional vector in orders: "W1", "W2", "WL", "b1", "b2", "bL" layers -- list of (n_uints, activations) pairs that define network structure, including input layer X Returns: ret -- parameter dictionary, {"W1": ..., "WL": ..., "b1": ..., "bL": ..., "r1": ..., "rL": ...} """ ret = {} idx = 0 # recover Ws first for l in range(1, len(layers)): length = layers[l][0] * layers[l - 1][0] ret["W" + str(l)] = vector[idx:idx + length].copy().reshape( (layers[l][0], layers[l - 1][0])) idx = idx + length # recover bs for l in range(1, len(layers)): length = layers[l][0] ret["b" + str(l)] = vector[idx:idx + length].copy().reshape( (layers[l][0], 1)) idx = idx + length # recover rs for l in range(1, len(layers)): length = layers[l][0] ret["r" + str(l)] = vector[idx:idx + length].copy().reshape( (layers[l][0], 1)) idx = idx + length return ret
33f628463546892ae8127cad25bda1e7ae19a78a
10,637
def load_text(filepath): """Loads text from a file and returns it as a string. Args: filepath (str): The path to the file to be loaded. Returns: text (str): The text loaded from the file. Examples -------- >>> load_text('text.txt') """ with open(filepath, encoding='utf-8') as file: text = file.read() return text
66a925216a33f8b9e7abab4b7ada6ad2716cefd6
10,638
def f_call_one_optional_kwd(a, /, *, b=2): """ >>> f_call_one_optional_kwd(1) (1, 2) >>> f_call_one_optional_kwd(1, b=3) (1, 3) """ return (a,b)
4bcbc58d90f093775c4f52dcdfce0d0deb5d6686
10,639
def unique(it): """Return a list of unique elements in the iterable, preserving the order. Usage:: >>> unique([None, "spam", 2, "spam", "A", "spam", "spam", "eggs", "spam"]) [None, 'spam', 2, 'A', 'eggs'] """ seen = set() ret = [] for elm in it: if elm not in seen: ret.append(elm) seen.add(elm) return ret
8e182467eb8179c2b9168470f8b683710ad37b56
10,640
def create_read_fmt(read_id: str) -> str: """create a bitstruct format for any given read it up to 512 bits""" if len(read_id) > 64: raise ValueError("Read ID is too large") padding = 512 - len(read_id)*8 r_fmt = "t{0}p{1}".format(len(read_id)*8, padding) return r_fmt + ("u3" * 150) + ("u6" * 150)
f7319ffc62b55859ed8af3e6b7e044d9481f7384
10,642
def get_data_for_tpu(tpu_name, economic_dataframe, marry_dataframe, edu_dataframe, population_dataframe): """ :param tpu_name: the name of the tpu :param economic_dataframe: the dataframe contains median income and employment rate :param marry_dataframe: the dataframe which saves the marrital status :param edu_dataframe: the dataframe that saves the education information :return: """ # The economic dataframe contains median income and employment rate # median income median_income = list(economic_dataframe.loc[ economic_dataframe['Small Tertiary Planning Unit Group'] == tpu_name ]['Median Monthly Income from Main Employment(1)'])[0] # employment rate employment_rate = list(economic_dataframe.loc[ economic_dataframe['Small Tertiary Planning Unit Group'] == tpu_name ]['Labour Force Participation Rate(2)'])[0] # marrital status start_row_index_marry = \ marry_dataframe.loc[marry_dataframe['Small Tertiary Planning Unit Group'] == tpu_name].index.values[0] selected_tpu_dataframe = marry_dataframe.iloc[start_row_index_marry:start_row_index_marry + 6, :] marrital_rate = int(selected_tpu_dataframe.iloc[1, 4]) / int(selected_tpu_dataframe.iloc[5, 4]) # average population in each tpu average_population = population_dataframe.loc[population_dataframe['TPU'] == tpu_name, 'avg_population'].tolist()[0] tpu_area = population_dataframe.loc[population_dataframe['TPU'] == tpu_name, 'ShapeArea'].tolist()[0] # print(average_population) # education start_row_index_edu = \ edu_dataframe.loc[edu_dataframe['Small Tertiary Planning Unit Group'] == tpu_name].index.values[0] selected_edu_dataframe = edu_dataframe.iloc[start_row_index_edu:start_row_index_edu + 8, :] diploma = selected_edu_dataframe.iloc[4, 4] sub_degree = selected_edu_dataframe.iloc[5, 4] degree = selected_edu_dataframe.iloc[6, 4] # if the value equals '-', it means zero if diploma == '-': diploma_num = 0 else: diploma_num = int(diploma) if sub_degree == '-': sub_degree_num = 0 else: sub_degree_num = int(sub_degree) if degree == '-': degree_num = 0 else: degree_num = int(degree) numerator = diploma_num + sub_degree_num + degree_num denominator = int(selected_edu_dataframe.iloc[7, 4]) edu_rate = numerator / denominator return median_income, employment_rate, marrital_rate, edu_rate, average_population, tpu_area
fe71fd15fab28684f848b80e68746fe1e3a73f51
10,643
def spawn_dates_times(df, spawn_dates=True, spawn_times=False): """ Build date/times column from a timeseries dataframe :param df: (pd.DataFrame) - dataframe with datetime index :param spawn_dates: (boolean) - whether to spawn year, month, day cols :param spawn_times: (boolean) - whether to spawn hour, minute, second cols :return df: (pd.DataFrame) - dataframe with datetime index """ if spawn_dates: ind = df.index df = df.assign(year=ind.year, month=ind.month, day=ind.day) if spawn_times: ind = df.index df = df.assign(hour=ind.hour, minute=ind.minute, second=ind.second) return df
ee58b5117d65fa3f217b16973dcdc06918c5474b
10,645
def connection(): """ Function that provide the SQL connection to be uses by the ORM. Returns ------- (bool, SQL connection object) The first element of the tuple indicates whether the connection is to be closed after every query. The second element should provide an SQL connection object compliant with PEP-249. """ return None, None
65b0122a0cc7b57ab553ac85a6db576e3d242de6
10,646
def calculate_F1(max, compensate = 0, threshold = 0.5): """ Args: max (M,N) return: AP,TP,FP,FN,precision,recall,F1 """ if compensate < 0: compensate = 0 TP = 0 FP = 0 FN = 0 for i in range(len(max)): if max[i] > threshold: TP = TP + 1 else: FP = FP + 1 FN = compensate # precision = TP/(TP + FP) # recall = TP/(TP + FN) F1 = 2*TP / (2*TP+FN+FP) return TP,FP,FN,F1
6aab529bbf1837472f0d711997b31fca0fcc3eee
10,647
import subprocess import logging def subprocess_run(subprocessCMD, shell=False): """ Run a subprocess command, passed by dict of arguments. Executes by default with shell=False, and then logging STDOUT and STDERR """ procCall = subprocess.run(subprocessCMD,stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=shell) logging.info('----- STDOUT = \n{}'.format(procCall.stdout.decode('utf-8'))) logging.info('----- STDERR = \n{}'.format(procCall.stderr.decode('utf-8'))) return procCall
2a2307e133a83a5ec0c8abb100f39635242429e8
10,648
def scale_sample(xy_sample,grid): """ This function is used to apply the shifts and scaling to the sample. """ sample = xy_sample.copy() if grid.dim == 2: sample[:,2] = sample[:,2] + grid.shifts[0]*sample[:,0] + grid.shifts[1]*sample[:,1] sample[:,0] = sample[:,0]/grid.b0 sample[:,1] = sample[:,1]/grid.b1 return sample else: sample[:,3] = sample[:,3] + grid.shifts[0]*sample[:,0] + grid.shifts[1]*sample[:,1] + grid.shifts[2]*sample[:,2] sample[:,0] = sample[:,0]/grid.b0 sample[:,1] = sample[:,1]/grid.b1 sample[:,2] = sample[:,2]/grid.b2 return sample
3f0ae32f198ec75d112ff7d36ea2505dcd75dbea
10,649
import os def filter_filename(dirname, include=[], exclude=[], array_id=None): """Filter filename in a directory""" def get_array_id(filename): array_id = filename.split("_")[-2] try: array_id = eval(array_id) except: pass return array_id filename_collect = [] if array_id is None: filename_cand = [filename for filename in os.listdir(dirname)] else: filename_cand = [filename for filename in os.listdir(dirname) if get_array_id(filename) == array_id] if not isinstance(include, list): include = [include] if not isinstance(exclude, list): exclude = [exclude] for filename in filename_cand: is_in = True for element in include: if element not in filename: is_in = False break for element in exclude: if element in filename: is_in = False break if is_in: filename_collect.append(filename) return filename_collect
309eabb4c7e3b25e11bafd66a0cdc9a91169ad08
10,650
def parse_description(issue): """ :param issue: Issue object. :return: ROGA type: (a string that should be listeria/salmonella/vtec - otherwise they'll get others later. seq_ids: List of seqIDs which will later be written to file and used in ROGA report generation. """ # Take whatever is on the first line, assume it's the ROGA type. roga_type = issue.description.split()[0].lower() seq_ids = list() lines = issue.description.split() # Assume that every line other than the first (and has a - in it) is a seqID and append to a list of seqIDs # that will get returned to be used for ROGA generation. for i in range(1, len(lines)): if "-" in lines[i]: seq_ids.append(lines[i]) return roga_type, seq_ids
13ea063546a327dd94eff1fca8f03d9ea7ea1476
10,651
def test_metathesis(word, anagram): """ Tests if a word and an anagram are a metathesis pair This is only true if the words are I. Anagrams II. Differ in two places """ count = 0 for para in zip(word, anagram): if para[0] != para[1]: count += 1 return count == 2
c26090d92b526ce7d2a84138e4248c884a19f5bc
10,652
def standardize(ds, dim="time"): """Standardize Dataset/DataArray .. math:: \\frac{x - \\mu_{x}}{\\sigma_{x}} Args: ds (xarray object): Dataset or DataArray with variable(s) to standardize. dim (optional str): Which dimension to standardize over (default 'time'). Returns: stdized (xarray object): Standardized variable(s). """ stdized = (ds - ds.mean(dim)) / ds.std(dim) return stdized
99de2a60e340a7ad451daec27aad028485d4aa59
10,653
import sys def retrieve_past_timestamps(ref_timestamp, streams_stamps, prev_timestamp=0): """ get timestamps between two annotated frames. Parameters ---------- ref_timestamp : The timestamp of the annotated image. streams_stamps : Timestamps dict of the whole sequence prev_timestamp : The timestamp of the previously annotated image. Returns ------- dict: The timestamps of the frames in between the two annotated frames """ closest_previous_index = 0 previous_stamps = {} diff = sys.maxsize for index in streams_stamps: if (streams_stamps[index] > prev_timestamp) and (streams_stamps[index] <= ref_timestamp): previous_stamps[index] = streams_stamps[index] return previous_stamps
12b5918a1273129c603f4287102677d26946c202
10,654
import smtplib def os_error(): """Operating system error. AKA EnvironmentError / IOError.""" try: smtplib.SMTP('localhost:a_port') except OSError: return "bad port number"
2b99d466f04369ab98bdb40c7a3417dd8c089744
10,655
import requests import os import random def select_random_hashtag(): """ Returns a random hashtag from the latest posts or travel if none was found """ r = requests.get('https://api.instagram.com/v1/users/self/media/recent?access_token={}'.format(os.environ['ACCESS_TOKEN'])) if r.status_code == 200: data = r.json() tags = set() for media in data.get('data'): tags.update(media.get('tags')) return random.choice(list(tags)) return 'travel'
914ebd4af4ec1ce92d0b5325ca1cd9dc007ccf3d
10,656
def make_range_partition(min_val, max_val): """ Returns a new partitioning function that partitions keys in the range *[min_val:max_val]* into equal sized partitions. The number of partitions is defined by the *partitions* parameter """ r = max_val - min_val f = ("lambda k_n_p: int(round(float(int(k_n_p[0]) - {0}) / {1} * (k_n_p[1] - 1)))" .format(min_val, r)) return eval(f)
6b9c15f93312a913dff432f0954e409b69f97572
10,659
from re import S from re import T def construct_scale(root_note, scale_signature, scale_length=None): """Construct a musical scale from a root note Arguments: root_note -- A root note object from class Note scale_signature -- array of frequency ratios between consecutive notes on the scale scale_length -- Defaults to standard scale length. Specify when needing non-standard scale length (ex.: span multiple octaves) """ if not scale_length: # If not specified, default to standard scale length scale_length = len(scale_signature) scale_notes = [root_note] note = root_note for i in range(scale_length): halfstep_count = 1 if scale_signature[i % len(scale_signature)] == S else 2 if scale_signature[i % len(scale_signature)] == T else 3 note = note.get_next_step_note(halfstep_count) scale_notes.append(note) return scale_notes
555099024c4e9fc8d2b487ddc7e57138084e494f
10,660
def get_env_logname(name): """ get_env_logname """ modname = name.split('.')[0].upper() envname = f"{modname}_LOGLEVEL" # print(envname) return envname
6fa8032ee77d94695363d976ea06a4e58f69c7f7
10,661
def get_objhash_from_object_desc(gitobjcontent): """returns object hash without control characters""" return gitobjcontent.split(" ")[1][:40]
662357569cb22bf77d6236e7c53a71f19f29823a
10,663
def completeTreeNames(tree, useBS = False ) : """ Takes: - tree (ete3.Tree) - useBS (bool) [default = False] : uses bootstrap to name nodes Returns: (ete3.Tree) : the tree, but where the nodes without a name now have one that correspond to their post-order OR their bootstrap """ for i,n in enumerate(tree.traverse('postorder')): if n.name == "": print (n.support) if useBS: n.name = str(int(n.support)) else: n.name = str(i) return tree
07077fce3ea18ba40af578cd1b0768a71e7b16c8
10,664
import random def gen_rand_lists(): """ Generate two lists with random numbers in [0,5], output them and compare their mean and tell who is greater """ def gen_list(): ll = [] for i in range(5): ll.append(random.randint(0, 5)) return ll def calc_average(x): sum = 0 for i in x: sum += i return sum/len(x) list1 = gen_list() list2 = gen_list() list1_average = calc_average(list1) list2_average = calc_average(list2) print("list1:", list1) print("list2:", list2) if list1_average > list2_average: print("Average of list1 is %.2f and bigger than %.2f of list2" % (list1_average, list2_average)) elif list1_average == list2_average: print("Average of list1 %.2f is the same as list2 %.2f" % (list1_average, list2_average)) else: print("Average of list2 is %.2f and bigger than %.2f of list1" % (list2_average, list1_average))
03195edb47f43ce17933188523235f87472667d1
10,666
import random def get_boundary_mutation_function(minimum, maximum): """ Returns a function that pushes a value to either the minimum or maximum allowed value for a gene; see :ref:`mutation-functions` :Valid For: ``"int"``, ``"[int]"``, ``"float"``, and ``"[float]"`` gene types :param minimum: the minimum allowed value :type minimum: int/float :param maximum: the maximum allowed value :type maximum: int/float :returns: either ``minimum`` or ``maximum`` (equally likely) """ return lambda value: minimum if random.random() < 0.5 else maximum
0f3e2b6b4d69e758fb1d439d2785306da7f537e8
10,667
def read_raw_parse(path): """ Read GamParse's forum output into a list. :param path: path to a file containing GamParse output :return: a list containing the lines of the input file """ with open(path, 'r') as input_handle: return input_handle.read().splitlines()
26926ca6a164c29681a0cf45a4114ffc5cff0fa9
10,668