content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import random def evolve(pop, mut_rate, mu, lambda_): """ Evolve the population *pop* using the mu + lambda evolutionary strategy :param pop: a list of individuals, whose size is mu + lambda. The first mu ones are previous parents. :param mut_rate: mutation rate :return: a new generation of individuals of the same size """ pop = sorted(pop, key=lambda ind: ind.fitness) # stable sorting parents = pop[-mu:] # generate lambda new children via mutation offspring = [] for _ in range(lambda_): parent = random.choice(parents) offspring.append(parent.mutate(mut_rate)) return parents + offspring
e2510d0ce92d0c5703b9166778f48581db4aca2f
15,007
import re def strip_comments(source_code): """ Strips comments from source code to increase analysis accuracy. The function is similar to strip_quotations. It removes all comments from the source_code. It is also used to prevent false positives. It uses a regex to match and remove line comments and then block comments. :param source_code: the string that contains source code :return: same source code as source but without any comments """ line_comment_regex = "\\/\\/.*\\n" block_comment_regex = "\\/\\*(.|[\\r\\n])*?\\*\\/" line_comments_removed = re.sub(line_comment_regex, "\n", source_code) all_comments_removed = re.sub(block_comment_regex, "\n", line_comments_removed) return all_comments_removed
7e3842940367caaae875ec6c24a1a40fdf7efbc0
15,008
def preprocess_data(data, label_encoder): """ Get input columns from raw data """ # Remove Tree Name column input_data = data """ Encode Categorical variables """ for i,item in enumerate(input_data[0,:]): if isinstance(item, str) is True: input_data[:, i] = label_encoder.fit_transform(input_data[:, i]) input_data = input_data.astype('float32') """ Return data """ return input_data
50f03f59a3ea98915d760f74a888780af69b1c68
15,009
def make_ratings_hash(ratings): """ Make a hashtable of ratings indexed by itemId and pointing to the vector (genres, decade) that fully characterize an item. """ rhash = {} # For every rating, check if the relevant item is already in the map. # If not, add it to the map. Key is item_id, mapped value is the vector # consisting of the possible genres and the decade of the movie. for row_indx, itemid in ratings['itemid'].iteritems(): if itemid not in rhash: itemData = ratings.ix[row_indx, 'Action' : 'decade'] rhash[itemid] = itemData return rhash
e105f08dbdbec544dccdcd299aaf189ef64691b7
15,012
def predecessor(self, root): """ One step left and then always right """ root = root.left while root.right: root = root.right return root.val
77b22323f4623df3002b9e7d12123d4ee9a98cad
15,013
def get_rules(rules_module): """ Get rule functions from module :param rules_module: module with rules implementations :return: rule functions """ rules = [] for item in dir(rules_module): if item.startswith("rule"): rules.append(getattr(rules_module, item)) return rules
41b9428f0893a153700a19f4237453af8910a759
15,014
def format_pun_list(header, entries, footer, *, cnt=1): """ Generate the management list of entries. """ msg = header for ent in entries: msg += '{}) {}\n Hits: {:4d}\n\n'.format(cnt, ent.text, ent.hits) cnt += 1 msg = msg.rstrip() msg += footer return msg
3393c0f01fba1cef412a5b68e1ca180af86b3693
15,015
import requests def _download_chunk(args): """Download a single chunk. :param args: Tuple consisting of (url, start, finish) with start and finish being byte offsets. :return: Tuple of chunk id and chunk data """ idx, args = args url, start, finish = args range_string = '{}-'.format(start) if finish is not None: range_string += str(finish) response = requests.get(url, headers={'Range': 'bytes=' + range_string}) # Actual HTTP get download request return idx, response.content
eb88e1200fff8d336908d2247e12c46576bb4422
15,017
import torch def discounted_cumsum(rewards, discount): """Calculates the cummulative sum of discounted rewards Arguments: rewards {torch.Tensor} -- rewards discount {float} -- discount factor Returns: [type] -- cummulative sum of discounted rewards """ discount **= torch.arange(0, rewards.shape[0]) disc_cumsum = torch.cumsum(discount * rewards, 0).flip() return disc_cumsum
b0e2cd45c4cb6882a84784c64a20ae64995f26a6
15,019
def histogram(content): """ Takes a source_text contents of the file as a string and return a histogram data structure that stores each unique word along with the number of times the word appears in the source text.""" words = content.split() # Dictionary histogram_dic = {'one': 1, 'fish': 4} print('words', words) for index in range(len(words)): if not any(words[index] in word_count for word_count in histogram_dic): histogram_dic[words[index]] = 1 else: histogram_dic[words[index]] += 1 return histogram_dic
28af6161b05472fef9632bfb0df7e0672ed9359c
15,020
def _StripPC(addr, cpu_arch): """Strips the Thumb bit a program counter address when appropriate. Args: addr: the program counter address cpu_arch: Target CPU architecture. Returns: The stripped program counter address. """ if cpu_arch == "arm": return addr & ~1 return addr
9127f10cbbeb71814f6c9f30e7446d5f8233bb67
15,021
from pathlib import Path import pickle def fetch_data(name): """Fetches data with the given name. Data is stored in a file namemed name.pickle in the current directory. """ filename = Path(__file__).parent / (name + ".pickle") with open(filename, 'rb') as f: return pickle.load(f)
2bc908ec29555f3d7dd4263b816cb9727c7f8f85
15,022
import random def get_random_replicates(well_df, replicate_cardinality): """This function return a list of random replicates that are not of the same compounds or found in the current cpd's size list """ while (True): # Randomly sample replicate names random_replicates = random.sample(well_df.replicate_name.tolist(), replicate_cardinality) # Make sure there are no duplicate perturbations in the random sample unique_rep_count = well_df.query("replicate_name in @random_replicates").pert_iname.nunique() if unique_rep_count == replicate_cardinality: break return random_replicates
294d7c431d9e80baca25cc8cb6022558874c3674
15,023
import curses def set_window_parameters(): """ Function to set all the window sizes and placements on the screen. Returns a dictionary of all curses windows where key is a string that is the title of the window (eg 'twitter'), and the value is the curses window object. """ max_y, max_x = curses.initscr().getmaxyx() VERTICAL_SPLIT = 0.6 HORIZONTAL_SPLIT = 0.5 INPUT_WINDOW_LINES = 2 INPUT_INDENT = 1 window_width = { 'twitter': int(max_x * (1 - VERTICAL_SPLIT)), 'monitor': int(max_x * VERTICAL_SPLIT), 'rss' : int(max_x * VERTICAL_SPLIT), 'input' : int(max_x * VERTICAL_SPLIT) - INPUT_INDENT } window_height = { 'twitter': int(max_y), 'monitor': int(max_y * HORIZONTAL_SPLIT) - INPUT_WINDOW_LINES, 'rss' : int(max_y * HORIZONTAL_SPLIT), 'input' : INPUT_WINDOW_LINES } window_row_begin = { 'twitter': 0, 'monitor': int(max_y * HORIZONTAL_SPLIT), 'rss' : 0, 'input' : max_y - INPUT_WINDOW_LINES } window_column_begin = { 'twitter': int(max_x * VERTICAL_SPLIT), 'monitor': 0, 'rss' : 0, 'input' : INPUT_INDENT } twitter_window = curses.newwin( window_height['twitter'], window_width['twitter'], window_row_begin['twitter'], window_column_begin['twitter']) rss_window = curses.newwin( window_height['rss'], window_width['rss'], window_row_begin['rss'], window_column_begin['rss']) monitor_window = curses.newwin( window_height['monitor'], window_width['monitor'], window_row_begin['monitor'], window_column_begin['monitor']) input_window = curses.newwin( window_height['input'], window_width['input'], window_row_begin['input'], window_column_begin['input']) window_dict = { 'twitter': twitter_window, 'monitor': monitor_window, 'rss' : rss_window, 'input' : input_window } return window_dict
9be542c065665ab22afdb692a635fa31adfa9a32
15,024
def compute_weighted_profile_average(profile, attribute): """ :param profile: :param attribute: """ return sum([getattr(entry, attribute) * weight for entry, weight in profile]) / \ sum([weight for entry, weight in profile])
9bed1ff173c9cda9a88458ff1484d903a5ee2696
15,025
def prune(set_list): """ Prunes combinations that are not identical in symbols. :param set_list: the list of sets containing nodes. :return the pruned list """ symbol_set_list = [{symbol for _, _, symbol in current_set} for current_set in set_list] return [sl for _, sl in list(filter(lambda t: len(t[0]) == 1, zip(symbol_set_list, set_list)))]
530c6be1df860bed87540ba2080096f2c318e9bf
15,027
def domains_to_xyt(xyt, domains): """ This function takes in xyt data and a list of domains and converts the list of domains into a list of xyt points within those domains. This list can then be used to graph the domains onto the entire worm path, for visualization. :param xyt: A list of xyt points. :param domains: A list of domains, which are themselves a list of two values, representing time indices that frame a period of zero movement in the worm path. :return: Three lists, each one representing values of x, y, and t within the given input domains. These can be zipped together to get a list of xyt points within the domains. """ x, y, t = zip(*xyt) domains_x = [] domains_y = [] domains_t = [] for domain in domains: left = domain[0] right = domain[1] domains_x.extend(x[left:right]) domains_y.extend(y[left:right]) domains_t.extend(t[left:right]) return domains_x, domains_y, domains_t
56bb2614d3f612913b19c550ed042e6a635ae4fa
15,028
def replicaset_status(client, module): """ Return the replicaset status document from MongoDB # https://docs.mongodb.com/manual/reference/command/replSetGetStatus/ """ rs = client.admin.command('replSetGetStatus') return rs
eb46cd1e28ecb1f2c6c5a222d176438677bd8e8c
15,029
def new_user_form(): """Display an HTML form for user creation.""" return """ <form action='/users' method='POST'> Username: <input name='username' type='text'/> Password: <input name='password' type='password'/> Bio: <textarea name='bio'></textarea> <button type='submit'>Submit</button> </form> """
1aeb4ba99ec5aa91bdc9c064da020e3eebb6b9ed
15,032
from textwrap import dedent def minimal_html_page( body: str, css: str = "", title: str = "Standalone HTML", lang: str = "en" ): """Return a template for a minimal HTML page.""" return dedent( """\ <!DOCTYPE html> <html lang="{lang}"> <head> <meta charset="utf-8"> <title>{title}</title> <style> {css} </style> </head> <body> {body} </body> </html> """ ).format(title=title, lang=lang, css=css, body=body)
71397e60fefab240dbd0b173e437ef90be4b8493
15,033
def _common_strategies(choices): """Generate some common strategies to deal with multiple references.""" return {'min': min(choices), 'max': max(choices), 'avg': sum(choices) * 1. / len(choices) }
ffe0070667aabe2ab8072bb39a6e452341780c41
15,034
import struct def doubleToRawLongBits(value): """ :param value: A float value :return: The IEEE 754 biit representation of the given double-precision floating-point value. """ return struct.unpack('Q', struct.pack('d', value))[0]
8c5c525e481a50225a6bd99fffbd7fb0555aba79
15,035
import re def tokenize(text, regex=r'[a-zA-z]+'): """Split text into tokens using a regular expression :param text: text to be tokenized :param regex: regular expression used to match tokens using re.findall :return: a list of resulting tokens >>> tokenize('the rain in spain') ['the', 'rain', 'in', 'spain'] """ return re.findall(regex, text, flags=re.IGNORECASE)
c7daee14ec14ff4a22d42883f3b3e3827924389e
15,036
def cover_phone_number(no): """ >>> cover_phone_number('01234 567 890') '01234 *** *** **' """ result = '' for order, digit in enumerate(no): if order < 5: result = result + digit else: if order in (5, 8, 11): result = result + ' ' result = result + '*' return result
d33d98e915135644119a66d39ac8f9fc80770d62
15,040
from typing import Iterable def concatenate_lists(*list_of_lists: Iterable) -> list: """Combines the Iterables provided as arguments into one list. Examples -------- Normal usage:: concatenate_lists([1, 2], [3, 4], [5]) # returns [1, 2, 3, 4, 5] """ return [item for lst in list_of_lists for item in lst]
e630fd31888753814e3f486c02b0fc1e67f269ef
15,041
import os def getPathList(folderFullName: str) -> list: """ 특정 폴더의 전체 파일을 리스트 형태로 반환합니다. :param folderFullName:파일명을 가져올 대상 경로. 절대경로 :return: 대상 경로 안에 들어있는 모든 경로들을 절대 경로로 반환(list) """ fullNameList = [] for dirName, subDirList, fnames in os.walk(folderFullName): for fname in fnames: fullName = os.path.join(dirName, fname) fullNameList.append(fullName) return fullNameList
ff4ee0f6a49922acd5e7cbdb5db709489e2c1454
15,042
def getAria2SpinSystems(ariaRestraint): """Descrn: Get the pairs of ARIA spin systems that correspond to the contributions to an ARIA restraint Inputs: Aria2 restraint object Output: List of 2-List of ARIA SpinSystem objects """ spinSystemPairs = [] for contribution in ariaRestraint.getContributions(): ariaSpinSystems = contribution.getSpinSystems() if len(ariaSpinSystems) == 2: spinSystemPairs.append(list(ariaSpinSystems)) return spinSystemPairs
22a6d717787ec74144457451b78a4651577f9619
15,043
def is_attr_pattern(text): """Attribute patters are like '[title=Version \d.*]' """ return text.startswith('[')
05978edb1d6589f47a96ab6cb2da4b7f0e3f2569
15,044
def test_input_data(): """A set of fields required to validate the company form""" return { 'DUNS Number': '123456789', 'Business Name': 'Widgets Pty', 'Secondary Name': 'Lots-a-widgets', 'National Identification System Code': '12', 'National Identification Number': '1234567', 'Street Address': 'address 1', 'Street Address 2': 'address 2', 'City Name': 'city', 'State/Province Name': 'county', 'State/Province Abbreviation': '', 'Postal Code for Street Address': 'postcode', 'Country Code': '790', 'Line of Business': 'agriculture', 'Year Started': '2000', 'Global Ultimate DUNS Number': '', 'Out of Business indicator': 'N', 'Legal Status': '3', # corporation 'Employees Total Indicator': '2', 'Employees Total': 5, 'Annual Sales Indicator': '2', 'Annual Sales in US dollars': 8.00, }
34f1826119882a9fb1a8fadc40845a435bcdfb98
15,045
import os import random def generate_meta(images_dir, output_file, n_images): """ Helper method for generating metadata from a folder of images. Generated metadata is compatible with the task specification. """ images_dir = images_dir.resolve(strict=True) img_names = os.listdir(images_dir) if n_images > 0: img_names = random.sample(img_names, n_images) meta = [str(images_dir / img) + '\n' for img in img_names] with open(output_file, 'w') as f: f.writelines(meta) return meta
7045ce168af0db2a894183eca72fac53c92fea46
15,046
def determine_feature_dtype(X, features): """ Determine if any features are categorical. """ feature_names = list(X.columns) non_cat_features = [] cat_features = [] for f in features: if f not in feature_names: raise KeyError(f"'{f}' is not a valid feature.") if str(X.dtypes[f]) == "category": cat_features.append(f) else: non_cat_features.append(f) return non_cat_features, cat_features
39368183612f214e9b1385be0d3bc716f7eaa950
15,047
def strip_nondigits(string): """ Return a string containing only the digits of the input string. """ return ''.join([c for c in string if c.isdigit()])
564a05de12c61a09a6d07e13b13279908128389a
15,049
def get_top_card_symbol(hand): """ :param hand: rankbit of a given hand without any repetition :return: the rankbit rep of the top card in the current hand """ i = 0 while hand >> i != 0: i += 1 return 1 << i
578f69f9c1b00f367670bf7d4bb95becb412cee1
15,050
def parse_begins(msg): """Parse the guard ID out of the "begins his shift" message.""" words = msg.split() return int(words[1][1:])
b07e3d741038365dddbacfdb70d219ef1bf007d8
15,052
import pickle def load_pickle(file_name): """ load a pickle object from the given pickle file :param file_name: path to the pickle file :return: obj => read pickle object """ with open(file_name, "rb") as pick: obj = pickle.load(pick) return obj
e50ae7ccd4b72700e5079774c45ec91b240bc88a
15,055
def find_extremes(content_list): """Calculates the smallest and highest values of every position's column of the positions of features of a wortverbund. Args: content_list: list with features of a wortverbund and their positions of occurrence. Returns: smallest_values: list containing the smallest values found for a position's column in a "content_list". highest_values: list containing the highest values found for a position's column in a "content_list".""" max_length = 0 for i in range(len(content_list)): if max_length < len(content_list[i][1]): max_length = len(content_list[i][1]) smallest_values = [9999999]*max_length highest_values = [0]*max_length try: for i in range(1, max_length): for j in range(len(content_list)): if smallest_values[i] > content_list[j][1][i]: smallest_values[i] = content_list[j][1][i] if highest_values[i] < content_list[j][1][i]: highest_values[i] = content_list[j][1][i] except IndexError: pass return smallest_values, highest_values
75fccb7cd483bfe9e2de0440a93c77a380aef9ad
15,057
import glob import os def get_track_ids(annot_dir): """Obtains the track ids of all the files contained in the experiment. Parameters ---------- annot_dir: str Path to the annotations directory where all the jams file reside. Retutns ------- track_ids: list List containing all the files in the experiment. """ return glob.glob(os.path.join(annot_dir, "*.jams"))
39293496c792bcb429f46aab93c694e231b0d8a4
15,058
def get_t_epoch(jd): """ Get the JD in julian centuries """ t = (jd - 2451545.0) / 36525. return t
6605018efc72e37b635240180575e31fb08352b1
15,059
from typing import List from typing import Dict from typing import Union def format_resources(resources: List[str], resource_param: List[str], kwargs: Dict[str, Union[List[str], str]]): """ >>> resources=['hello/{user_name}'] >>> resource_param=['user_name'] >>> kwargs={'user_name': "bob"} >>> x = format_resources(resources, resource_param, kwargs) >>> x == ['hello/bob'] :param resources: :param resource_param: :param kwargs: :return: """ _rp = dict() for key in resource_param: v = kwargs.get(key) if isinstance(v, str): _rp[key] = v return [resource.format_map(_rp) for resource in resources]
9a50c0ee34b07cb228dda781668dd61b94ebaa67
15,060
import glob import os def get_present_scene_ids(dp_split): """Returns ID's of scenes present in the specified dataset split. :param dp_split: Path to a folder with datasets. :return: List with scene ID's. """ scene_dirs = [d for d in glob.glob(os.path.join(dp_split['split_path'], '*')) if os.path.isdir(d)] scene_ids = [int(os.path.basename(scene_dir)) for scene_dir in scene_dirs] scene_ids = sorted(scene_ids) return scene_ids
fd3661db7d80aae15635782bedcdd2ca508fbc8a
15,061
import os def _readme(): """Find the README.*. Prefer README.rst Returns: str: Name of README """ for which in 'README.rst', 'README.md', 'README.txt': if os.path.exists(which): return which raise ValueError('You need to create a README.rst')
1fa683974bd542e51de344ab28db027730b47c97
15,062
def baits(path_to_file, created_baitset_id=None, build='GRCh37'): """Reads the bed-file and returns a list of dictionaries Args: path_to_file(): path tp temp file Returns: bait_dict_list(list): a list of dictionaries with the keys: chromosome, chr_start, chr_stop """ bait_dict_list = [] with open(path_to_file, 'r') as file: for line in file: bait = {} line = line.strip().split('\t') bait['chromosome'] = line[0] bait['chr_start'] = line[1] bait['chr_stop'] = line[2] if created_baitset_id: bait['baitset'] = [created_baitset_id] #create a unique id for the bait: # id looks like this: chr_start_stop_build bait['_id'] = bait['chromosome']+"_"+bait['chr_start']+"_"+bait['chr_stop']+"_"+build bait_dict_list.append(bait) return bait_dict_list
8f05b295a233bd42d1baf40d3ad03477e192e425
15,064
def prepend_domain(link): """ Urls are directly combined as given in *args """ top_level_domain ='https://www.proff.no' return top_level_domain + link
25e599d2744100aeea71556751906dc1a9166428
15,066
def ref_str_to_tuple(ref): """String like ' a : b ' to tuple like ('a', 'b').""" return tuple(x.strip() for x in ref.split(':'))
fc2e467f054d2b53a580f1d0917d01eda9ba1727
15,067
def bin_append(a, b, length=None): """ Appends number a to the left of b bin_append(0b1, 0b10) = 0b110 """ length = length or b.bit_length() return (a << length) | b
c2d3132532b1d9311d5b6eef94289c0763422665
15,069
def levenshtein_distance(word_1, word_2): """ Calculates the levenshtein distance (= the number of letters to add/ substitute/interchange in order to pass from word_1 to word_2) """ array = [[0 for i in range(len(word_2)+1)] for y in range(len(word_1)+1)] for i in range(len(word_1)+1): array[i][0] = i for j in range(len(word_2)+1): array[0][j] = j for i in range(1, len(word_1)+1): for j in range(1, len(word_2)+1): cost = 0 if word_1[i-1] == word_2[j-1] else 1 array[i][j] = min( array[i-1][j] + 1, array[i][j-1] + 1, array[i-1][j-1] + cost ) return array[len(word_1)][len(word_2)]
ce43e60454b59c3c1323656636f457bc192a2c67
15,070
import inspect def get_source_link(obj, page_info): """ Returns the link to the source code of an object on GitHub. """ package_name = page_info["package_name"] version = page_info.get("version", "master") base_link = f"https://github.com/huggingface/{package_name}/blob/{version}/src/" module = obj.__module__.replace(".", "/") line_number = inspect.getsourcelines(obj)[1] return f"{base_link}{module}.py#L{line_number}"
86748f179e44cec37efd88e1b8dc5de0c6268631
15,071
def concat_environment(env1, env2): """ Concatenate two environments. 1 - Check duplicated keys and concatenate their values. 2 - Update the concatenated environment. Parameters ---------- env1: dict (mandatory) First environment. env2: dict (mandatory) Second environment. Returns ------- concat_env: dict Updated environment where the duplicated keys values are concatenated with ':'. """ concat_env = env1 for key, value in env2.items(): if key in concat_env.keys(): if value != concat_env[key]: concat_env[key] += ":" + env2[key] else: concat_env[key] = env2[key] return concat_env
25f7aee5a9316ab0604f2e38538a1f67a5333b08
15,072
import re def is_path(source): """ check if the supplied source is a valid path (this does not appear to be part of moments.path module, but could be relocated there) ** this will not match relative paths ** """ if re.match('/', source): return True else: return False
405ff439b2785c54b30877574408e65a4f36e3de
15,073
import argparse def parse_cmdargs(): """ Using argparse module, get commandline arguments :return: """ parser = argparse.ArgumentParser(description='Parse Covid19 dataset from JHU-CSSE, add ' 'population data and dump combined data to ' 'file in csv or json format') parser.add_argument('--covid_data', help='JHU_CSSE Covid19 data csv filepath', required=True) parser.add_argument('--pop_data', help='Population data csv filepath', required=True) parser.add_argument('--date', help='Date of dataset in YYYY-MM-DD format', required=False) parser.add_argument('--out_file', help='Output data filepath', required=True) parser.add_argument('--format_json', help='Dump output as json', action='store_true', default=False, required=False) args = parser.parse_args() return (args.covid_data, args.pop_data, args.date, args.out_file, args.format_json)
dbe753a45e2c7fdb4c882167825635ec5e1f30b6
15,074
import itertools def get_emin_emax(self): """ Finds how much the Ek_grid has to be expanded above the bandwidth D of the leads. Parameters ---------- self : Approach2vN Approach2vN object. self.funcp.emin : float (Modifies) Minimal energy in the updated Ek_grid. self.funcp.emax : float (Modifies) Maximal energy in the updated Ek_grid. """ # (E, si, dband) = (self.qd.Ea, self.si, self.leads.dlst[0,1]) (E, si, dmin, dmax) = (self.qd.Ea, self.si, self.funcp.dmin, self.funcp.dmax) lst = [dmin, dmax] for charge in range(si.ncharge): for b, bp in itertools.product(si.statesdm[charge], si.statesdm[charge]): lst.append(dmax-E[b]+E[bp]) lst.append(dmin-E[b]+E[bp]) for charge in range(si.ncharge-2): for d, b in itertools.product(si.statesdm[charge+2], si.statesdm[charge]): lst.append(dmin+E[d]-E[b]) lst.append(dmax+E[d]-E[b]) self.funcp.emax = max(lst) self.funcp.emin = min(lst) return 0
102198b413f190264e231c80f896045fc2eb3f58
15,076
import re def substitute_pattern_with_char(s, pattern, repl_char='x'): """ This is a little different than re.sub(). It replaces all the characters that match the pattern with an equal number of `repl_char` characters. The resulting string should be the same length as the starting string. >>> substitute_pattern_with_char(s='Hi there', pattern=r'[a-z]+', repl_char='x') 'Hx xxxxx' >>> substitute_pattern_with_char(s='With 42 cats', pattern=r'[\d]+', repl_char='x') 'With xx cats' >>> substitute_pattern_with_char(s='With 42 cats and 12 dogs', pattern=r'[\d]+', repl_char='x') 'With xx cats and xx dogs' >>> substitute_pattern_with_char(s='With 42 cats and 12 dogs', pattern=r'[\d]+\s+(cat[s]?|bird[s]?)', repl_char='x') 'With xxxxxxx and 12 dogs' """ for mo in re.finditer(pattern=pattern, string=s): m = mo.group(0) s = s.replace(m, ''.join([repl_char for i in range(0, len(m))])) return s
98d7f3d642430a5211aa7396a3454d979057ebef
15,077
def extract_table_name(file_name: str) -> str: """Extract the table name name from the filename Assumes the name of the bigquery table the data is being inserted to is the first word of the filename Examples: extract_file_extension(properties/properties_09.csv) >>> "properties" """ return file_name.split("/")[0]
1cac0a58325651abff1399974df52aaefa93585f
15,079
def calculate_block_ranges(scan, block_size): """ :param scans :type a scan object :param block_size: :type block_size: target block size in degrees""" image_ranges = [] nimages = scan.get_num_images() osc_range = scan.get_oscillation_range(deg=True) osc_width = abs(osc_range[1] - osc_range[0]) nblocks = max(int(round(osc_width / block_size)), 1) nblocks = min(nblocks, nimages) # equal sized blocks except the last one that may contain extra images # to make up the remainder nimages_per_block = [nimages // nblocks] * (nblocks - 1) + [ nimages // nblocks + nimages % nblocks ] start = scan.get_image_range()[0] for nim in nimages_per_block: image_ranges.append((start, start + nim - 1)) start += nim return image_ranges
1ea1f8ecb6b7a3a5713a7169effcf0db2d9eac06
15,080
import random def _randbytes(n: int) -> bytes: """Polyfill for random.randbytes in Python < 3.9""" return bytes(random.choices(range(256), k=n))
5d6b0a734774dc28d45d53cc2dfeedc8a986a8a5
15,082
def duel(Player, CPU): """ flag = 0 --> HAI PERSO flag = 1 --> PAREGGIO flag = 2 --> HAI VINTO """ flag = 0 # Initialize the flag if(CPU.cpu == Player.p_value): flag = 1 else: if(CPU.cpu == "sasso"): if(Player.p_value == "carta"): flag = 2 if(Player.p_value == "forbice"): flag = 0 if(CPU.cpu == "carta"): if(Player.p_value == "sasso"): flag = 0 if(Player.p_value == "forbice"): flag = 2 if(CPU.cpu == "forbice"): if(Player.p_value == "sasso"): flag = 2 if(Player.p_value == "carta"): flag = 0 # Check for result if(flag == 0): print(f"HAI PERSO ------> PLAYER: {Player.p_value} VS CPU: {CPU.cpu}") elif(flag == 1): print(f"PAREGGIO ------> PLAYER: {Player.p_value} VS CPU: {CPU.cpu}") elif(flag == 2): print(f"HAI VINTO ------> PLAYER: {Player.p_value} VS CPU: {CPU.cpu}") return flag
9248d86e14ae0783f87dcee42e0fd9a8299f0150
15,083
def avg(arr): """Count average.""" return sum(arr) / float(len(arr))
0da964cdb1d14154b4569d010b4caa710a30fd84
15,084
import numpy as np def get_time_course_for_columns( time_course_data_set, col_indices=None ): """ Pick out a set of columns :param time_course_data_set: :param col_indices: :return: """ if col_indices is None or None in col_indices: return None data = np.ndarray( shape=(len( time_course_data_set.data ), len( col_indices )), dtype=float ) data.fill( None ) for line_idx, data_line in enumerate( time_course_data_set.data ): split_data = data_line.split( ) for idx, col_idx in enumerate( col_indices ): if 0 <= col_idx <= len( split_data ): data[ line_idx, idx ] = float( split_data[ col_idx ] ) return data
1ec4570f37ad7eff10e201a934e05accea7aecbc
15,086
from typing import Any def get_full_name(obj: Any) -> str: """Returns identifier name for the given callable. Should be equal to the import path: obj == import_object(get_full_name(obj)) Parameters ---------- obj : object The object to find the classpath for. Returns ------- The object's classpath. """ if callable(obj): return obj.__module__ + '.' + obj.__qualname__ else: return obj.__class__.__module__ + '.' + obj.__class__.__qualname__
6f83f808f8c4d226b1d26365adc75f5cb6c4e28f
15,087
from statistics import mean from collections import namedtuple def infer_user_based_cf(model, train, test): """ Infer user CF Vanilla """ # Users' mean ratings - (user, mean_rating) mean_user_ratings = train.map(lambda x: (x['user_id'],x['stars']))\ .groupByKey()\ .mapValues(mean).collectAsMap() # Format Hash table of user weights - ((user1, user2), sim) users_weights = model.flatMap(lambda x: [ ((x[0], x[1]), x[2]), ((x[1], x[0]), x[2]) ]).collectAsMap() # User neighbors - (u, {u1,u2}) user_neighs = model\ .flatMap(lambda x: [ (x[0], x[1]), (x[1], x[0])])\ .groupByKey()\ .mapValues(set).collectAsMap() # rating's index by business biz_ratings = train.map(lambda x: (x['business_id'], (x['user_id'], x['stars'] )) )\ .groupByKey().mapValues(dict)\ .collectAsMap() # get predictions def _get_score(u, neighs, rates): num_, den_ = [], [] for n in neighs: if n in rates: w = users_weights[(u,n)] ra_i = rates[n] _ra = mean_user_ratings.get(n, 0) num_.append(w*(ra_i-_ra)) den_.append(abs(w)) if len(den_) == 0 or sum(den_) == 0: return 0 return mean_user_ratings.get(u, 0) + (sum(num_)/sum(den_)) Rating = namedtuple("Rating", ("user", "biz")) preds = test.map(lambda x: Rating(x['user_id'], x['business_id']))\ .map(lambda x: ( x.user, x.biz, _get_score(x.user, user_neighs.get(x.user, set()), biz_ratings.get(x.biz, {})) )) _preds = preds.collect() return _preds
15185905fb86dcb40dfc29f0d1c58e142bb8bd42
15,088
def estimate_microturbulence(effective_temperature, surface_gravity): """ Estimate microtubulence from relations between effective temperature and surface gravity. For giants (logg < 3.5) the relationship employed is from Kirby et al. (2008, ) and for dwarfs (logg >= 3.5) the Reddy et al. (2003) relation is used. :param effective_temperature: The effective temperature of the star in Kelvin. :type effective_temperature: float :param surface_gravity: The surface gravity of the star. :type surface_gravity: float :returns: The estimated microturbulence (km/s) from the given stellar parameters. :rtype: float """ if surface_gravity >= 3.5: return 1.28 + 3.3e-4 * (effective_temperature - 6000) \ - 0.64 * (surface_gravity - 4.5) else: return 2.70 - 0.509 * surface_gravity
bf54342e00fc61f042f183c8bbebc01005eb6b4c
15,089
from platform import python_version def pl_python_version() -> str: # pragma: no cover """ Stored procedure that returns databases python interpreter version @return: semantic python version X.X.X """ return python_version()
4c46818a35bf5b793fdfb58f0e8648efe59a777e
15,090
import yaml from typing import OrderedDict def ordered_dump(data, stream, Dumper=yaml.Dumper, representer=OrderedDict, **kwds): """ write data dict into a yaml file. :param: data: input dict :param stream: output yaml file :param Dumper: yaml.Dumper :param representer: =OrderedDict to write in order; =dict to write in random order :param kwds: optional args for writing a yaml file; eg.default_flow_style=False :return: yaml file """ class OrderedDumper(Dumper): pass def _dict_representer(dumper, data): return dumper.represent_mapping( yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items() ) OrderedDumper.add_representer(representer, _dict_representer) return yaml.dump(data, stream, OrderedDumper, **kwds)
77fd33e7726a75d7e2f93955e96118f6ff14e1a4
15,092
def search_the_word(word_dict, search_word): """word_dict => a list of words to search from. search_word => a word to search for in the provided list. USAGE: This function is used to search for a word in a list of provided list of other words. It proviides a mathch according to the accuracy of similar matches found in the list RETURN: The function returns a dictionary of two keys: do_you_mean = Containing a list of possible matches to the sugested word. match = Contains a string value of the match found, it is empty string if no matches are found. """ results = {"do_you_mean": [], "match": ""} i = 0 total = 0 do_you_mean = [] match = None for word in word_dict: """go through each word in the word dictionary""" # clean the words ie: remove punctuation word = word.strip(",.\'\"!@#$%?<>^&*()_+/*-+").lower().replace(" ", "") search_word = search_word.strip(",.\'\"!@#$%?><^&*()_+/*-+").lower().replace(" ", "") #break the dictinary word to a list of letters word_letters = list(word) #break the search word to a list fo letters search_word_letters = list(search_word) try: for x in search_word_letters: """look if the i'th letter of the dict word mathces to the search word""" if x == word_letters[i]: # if they match, incerase the confidence score total+=1 else: total = total + 0 # if no match, do nothing to the confidence score i+=1 except: pass percentage_score = (total*100)/len(search_word) # the confidece score in percentage if percentage_score == 100: match = word elif (percentage_score >=50) and (percentage_score < 100) and (len(search_word) == ((len(word)+2) or (len(word)-2))): do_you_mean.append(word) elif percentage_score <=10: pass # reset the i'th and total values to check the next dictionary word i = 0 total = 0 # print the list of close words OR any match found ## print("do you mean :: ", do_you_mean) ## print("match :: ", match) results["do_you_mean"] = do_you_mean results["match"] = match return results
311f63fb903e63e75a99b5ebbd09de5aba3a899c
15,093
def fO2(celsius, bars=1., buffer_curve='QFM'): """ Input: Temperature in Celcius Pressure in bars (default is 1) Buffer curve. Options are QFM (default), NNO, and IW QFM = quartz - fayalite - magnetite buffer (default) NNO = Ni-NiO IW = iron-wustite; Fe-FeO Output is log10 oxygen fugacity in bars for buffer_curve Regression of data from O’Neill (1987b) from Herd, 2008. """ Kelvin = celsius + 273.15 if buffer_curve == 'QFM': A = -24935.0 B = 8.489 C = 0.0 elif buffer_curve == 'NNO': A = -24525.4 B = 8.944 C = 0.0 elif buffer_curve == 'IW': A = -27489. B = 6.702 C = 0.055 else: print('Only QFM, IW, and NNO supported for now') return False logfO2 = ((A / Kelvin) + B + (C * (bars-1.0) / Kelvin)) return logfO2
a3bf160d4e0a51aed168e1aac783b90e5278c953
15,094
def compute_graph(local, rules): """ Obtain a DAG given an STA """ graph = {} for l in local: graph[l] = [r['to'] for r in rules if r['from'] == l and r['to'] != r['from']] return graph
94bde12c64a461a3bcb296568deccf3bc7624bd0
15,095
import os def energy_from_log(): """Get the total energy from the GROMACS log file""" if not os.path.exists('energy.log'): exit('energy.log did not exist - cannot obtain the energy') energy_filelines = open('energy.log', 'r').readlines() try: total_energy_idx = next(i for i, line in enumerate(energy_filelines) if 'Total Energy' in line) return float(energy_filelines[total_energy_idx+1].split()[1]) except StopIteration: exit('No total energy in energy.log')
2e1c11655e6a3b3df7a966a7066a3a2126164374
15,097
import re def messagestat_formatting(messageBody): """ Formats the message body for messagestat functions. This mostly involves replacing spaces with underscores and doubling up on quotations so that SQL server ignores them """ messageBody = re.sub(' ', '_', messageBody) messageBody = re.sub("'", "''", messageBody) if messageBody.startswith('"') and messageBody.endswith('"'): messageBody = messageBody[1:] messageBody = messageBody[:-1] return(messageBody)
f91a8fe73b336f97be8322c6b5034013090e0807
15,099
def change_pals_col_names(pals_df): """ :param pals_df: A dataframe returned fromm the PALS package :return: The pals_df with columns removed and headings changed for use on the website """ pals_df.reset_index(inplace=True) columns = pals_df.columns # Drop the columns that are not required for the FlyMet page. drop_list = ['sf', 'exp_F', 'Ex_Cov'] for c in columns: if 'p-value' in c: drop_list.append(c) pals_df.drop(drop_list, axis=1, inplace=True) # Rename the columns that are left for use on the website pals_df.rename(columns={'index': 'Reactome ID', 'pw_name': 'Pathway name', 'unq_pw_F': 'PW F', 'tot_ds_F': 'DS F', 'F_coverage': 'F Cov'}, inplace=True) for c in columns: if "ChEBI" in c: c_new = c.replace("ChEBI", "") else: c_new = c if 'comb_p' in c: split_c = c_new.split('/') col_name = split_c[0].strip() pals_df.rename(columns={c: col_name}, inplace=True) return pals_df
ff81ca9f200bc80e2037b14779d674c98e2f155d
15,100
def get_sol_ud_model_columns(df_columns): """Get experimental condition features.""" sol_ud_model_columns = list(filter(lambda column_name: column_name.startswith('_rxn_') and not column_name.startswith('_rxn_v0'), df_columns)) sol_ud_model_columns.remove('_rxn_organic-inchikey') return sol_ud_model_columns
ab05433a6cb05af82c4642c27fb5cb85851e88e7
15,102
def _ngrams_from_tokens(tokens, n, join=True, join_str=' '): """ Helper function to produce ngrams of length `n` from a list of string tokens `tokens`. :param tokens: list of string tokens :param n: size of ngrams :param join: if True, join each ngram by `join_str`, i.e. return list of ngram strings; otherwise return list of ngram lists :param join_str: if `join` is True, use this string to join the parts of the ngrams :return: return list of ngram strings if `join` is True, otherwise list of ngram lists """ if n < 2: raise ValueError('`n` must be at least 2') if len(tokens) == 0: return [] if len(tokens) < n: # raise ValueError('`len(tokens)` should not be smaller than `n`') ngrams = [tokens] else: ngrams = [[tokens[i+j] for j in range(n)] for i in range(len(tokens)-n+1)] if join: return list(map(lambda x: join_str.join(x), ngrams)) else: return ngrams
39afd35a4e715ea3e358a81d3a557eb1f0f0310a
15,103
def parse_prior_params(bt_config, code_config, key, default, prior='pre'): """ Parse parameters with priority. Args: bt_config(dict): bt config code_config(dict): code config key(string): parameter name default(default): default value prior(string): use bt_config in prior if 'pre' otherwise code_config Returns: value """ if prior == 'pre': return bt_config.get(key, code_config.get(key, default)) else: return code_config.get(key, bt_config.get(key, default))
beee29f04562173d18cc95b2b49683dee02fbca8
15,104
def map_merge_cubes(process): """ """ # intentionally empty, just for clarity # the only thing needed for this process is to create a new pickled object from the input ones, already mapped by other functions in map_processes.py return []
26d247c6ddb0ae217edd58666126ebb94bda0c7a
15,105
def entity_tostring(entity): """Converts one GNL (Google Natural Language) entity to a readable string.""" metadata = ", ".join(['"%s": "%s"' % (key, value) for key, value in entity.metadata.items()]) mentions = ", ".join(['"%s"' % mention for mention in entity.mentions]) return ('{name: "%s",' ' type: "%s",' ' metadata: {%s},' ' salience: %s,' ' mentions: [%s]}') % ( entity.name, entity.type, metadata, entity.salience, mentions)
dd3e30247e36186e6eccfe1e32f8f31bf3577660
15,108
import sys import select def has_data_from_stdin(): """ Check if any data comes from stdin. :return: True if there are data from stdin, otherwise False """ if select.select([sys.stdin, ], [], [], 0.0)[0]: return True else: return False
005928784fc121877cfe9f4857b95427556c8b79
15,109
import networkx def graph_from_dict(d): """ Creates a NetworkX Graph from a dictionary Parameters ---------- d : dict Returns ------- Graph: NetworkX Graph Examples -------- >>> g = graph_from_dict({'a':['b'], 'b':['c', 'd'], 'c':[], 'd':[], 'e':['d']}) """ g = networkx.DiGraph() for key, children in d.items(): for child in children: g.add_edge(key, child) return g
e0029b6018ff840bd2f314038c25f41e025600a7
15,110
def dectobin(dec_string): """Convert a decimal string to binary string""" bin_string = bin(int(dec_string)) return bin_string[2:]
5f02507ae5e7ab855eceb7a5908347060b46a400
15,111
def underscorize(camelcased): """ Takes a CamelCase string and returns a separated_with_underscores version of that name in all lower case. If the name is already all in lower case and/or separated with underscores, then the returned string is identical to the original. This function is used to take CStruct class names and determine the names of their handler methods. Here are some example conversions: underscorize("SomeStruct") == "some_struct" underscorize("SSNLookup") == "ssn_lookup" underscorize("RS485Adaptor") == "rs485_adaptor" underscorize("Rot13Encoded") == "rot13_encoded" underscorize("RequestQ") == "request_q" underscorize("John316") == "john316" """ underscored, prev = "", "" for i,c in enumerate(camelcased): if (prev and not c.islower() and c != "_" and (prev.islower() and not c.isdigit() or c.isupper() and camelcased[i+1:i+2].islower())): underscored += "_" underscored += c.lower() prev = c return underscored
b0f2622c105c09502aa984e15cf1b61ac12a608b
15,112
def best_validation_rows(log_df, valid_col='valid_accuracy', second_criterion='iterations_done'): """ Takes a dataframe created by scripts/logs_to_dataframe.py and returns a dataframe containing the best-validation row for each log. """ return log_df.sort_values([valid_col,second_criterion],ascending=False).drop_duplicates(['log'])
9541adb6653a93bfe0385bd24beedf80b065dde7
15,114
def get_kwargs(names, defaults, kwargs): """Return wanted parameters, check remaining. 1. Extracts parameters `names` from `kwargs`, filling them with the `defaults`-value if it is not in `kwargs`. 2. Check remaining kwargs; - Raise an error if it is an unknown keyword; - Print warning if it is a keyword from another routine (verb>0). List of possible kwargs: - ALL functions: src, rec, res, aniso, epermH, epermV, mpermH, mpermV, verb - ONLY gpr: cf, gain - ONLY bipole: msrc, srcpts - ONLY dipole_k: freq, wavenumber - ONLY analytical: solution - ONLY bipole, loop: mrec, recpts, strength - ONLY bipole, dipole, loop, gpr: ht, htarg, ft, ftarg, xdirect, loop - ONLY bipole, dipole, loop, analytical: signal - ONLY dipole, analytical, gpr, dipole_k: ab - ONLY bipole, dipole, loop, gpr, dipole_k: depth - ONLY bipole, dipole, loop, analytical, gpr: freqtime Parameters ---------- names: list Names of wanted parameters as strings. defaults: list Default values of wanted parameters, in same order. kwargs : dict Passed-through kwargs. Returns ------ values : list Wanted parameters. """ # Known keys (excludes keys present in ALL routines). known_keys = set([ 'depth', 'ht', 'htarg', 'ft', 'ftarg', 'xdirect', 'loop', 'signal', 'ab', 'freqtime', 'freq', 'wavenumber', 'solution', 'cf', 'gain', 'msrc', 'srcpts', 'mrec', 'recpts', 'strength' ]) # Loop over wanted parameters. out = list() verb = 2 # get_kwargs-internal default. for i, name in enumerate(names): # Catch verb for warnings later on. if name == 'verb': verb = kwargs.get(name, defaults[i]) # Add this parameter to the list. out.append(kwargs.pop(name, defaults[i])) # Check remaining parameters. if kwargs: if not set(kwargs.keys()).issubset(known_keys): raise TypeError(f"Unexpected **kwargs: {kwargs}.") elif verb > 0: print(f"* WARNING :: Unused **kwargs: {kwargs}.") return out
20720215c9d1e2849b519c9f6ce2d8eb074752c3
15,115
def n_mask_items(request): """Percentage of items to mask on init""" return request.param
10c1214b8dca81fde59428835302abe35fc72d23
15,118
def align(source): """Return a string containing the same program but aligned to the start of a transaction or command (in that order). This function does not run a complete syntax check, but raises an ValueError, if the source is commented incorrectly. Examples: >>> align("+67; O=0, O+66; O=0, O") ' O=0, O+66; O=0, O+67;' >>> align("0, O+66, O=") ' O+66, O=0,' >>> align("=0O") 'O=0' >>> align("some (comments) in here.)(Only ") '(Only some (comments) in here.)' Raises: ValueError: If the source is commented incorrectly. """ # It's important to align at comments first, because # and of ";,=+-" could be part of a comment. # make sure, we have a correct count of '(' and ')' if not source.count('(') == source.count(')'): raise ValueError( "Incorrectly commented source: count fo '(':" + "%d, count for ')': %d." % (source.count('('), source.count(')'))) indices = [idx for idx in range(len(source)) if source[idx] == '('] for start in indices: idx = start count = 0 # Run through the source and keep track of the # count of opening and closing parentheses. # If we reach the starting index again and count is 0 # we have found a valid starting index for the program, # if the count is < 0 at any point, the starting point is invalid. while True: if source[idx] == '(': count += 1 elif source[idx] == ')': count -= 1 if count < 0: break idx += 1 idx %= len(source) if idx == start: break # found a valid start if count == 0: return source[start:] + source[:start] # If we reached this point, there wasn't a valid starting '('. if indices: raise ValueError( "Incorrectly commented source. No valid rotation exists.") for char in ";,": try: idx = source.index(char) source = source[idx + 1:] + source[:idx + 1] return source except ValueError: pass # no ";" or "," present align at "+-=" for char in "+-=": try: idx = source.index(char) source = source[idx - 1:] + source[:idx - 1] return source except ValueError: pass # Source empty? There could still be syntactically invalid programs, # but this is checked later... return source
228a1fe4bbc94e20b08828fb03e9714ca8acd6f9
15,119
import os def mkdir_even_if_exists(path,name): """creates a directory ubder path with a given name. If exists, adds integer number to directory name. returns directory full path""" directory = os.path.join(path,name) if os.path.exists(directory): i = 0 while True: directory_numbered = directory +str(i) if os.path.exists(directory_numbered): i+=1 elif i>2147483646: raise StopIteration('too many folders with the same name') else: os.makedirs(directory_numbered) directory = directory_numbered break else: os.makedirs(directory) return directory
141be84f9a0aa8fb40f5044aa6607cd2eec01c73
15,120
def getMsgTime(line): """Parse the timestamp off the 978 message and return as a float. Args: line (str): Line containing a 978 message with a timestamp at the end. Returns: float: Timestamp of the message. Raises: Exception: If the timestamp can't be found. """ payloadTimeIndex = line.find(';t=') if payloadTimeIndex == -1: raise Exception('Illegal time format') timeInSecs = float(line[payloadTimeIndex + 3:-1]) return timeInSecs
860a02a28d154357d6fcc61de182fee1f4875aaa
15,121
import os def get_student_id(username): """ Obtain a user's Student ID number from the server (if tied into the WPI network). :param username: The user's username (WPI network username) :return: Student ID number """ try: uid = os.popen('id -u ' + username).read().replace('\n', '') if uid not in ['', None]: return uid except: print('Unable to obtain id for ' + username) return None
1c0d99473b773a1d76ef5dec69762c660e060f17
15,122
def _counting_sort(a, max_value): """ sorting positive integers less than or equal to max_value """ result = [] counter = [0] * (max_value + 1) for i in a: counter[i] += 1 for i, count in enumerate(counter): result.extend([i]* count) return result
8d81ec58e851eab73011ee180c1fe60c36b46675
15,123
import os def auto_delete_photo_on_update(sender, instance, **kwargs): """ Deletes old photo from filesystem when Profile object is updated with new photo. """ if not instance.pk: return False try: old_photo = sender.objects.get(pk=instance.pk).photo except sender.DoesNotExist: return False new_photo = instance.photo if not old_photo == new_photo and bool(old_photo) == True: if os.path.isfile(old_photo.path): os.remove(old_photo.path)
f6aaf0c9b35a775d14f42e696a25d06dddd7f4e0
15,124
def GetAllFields(fielddefs): """Extract L{objects.QueryFieldDefinition} from field definitions. @rtype: list of L{objects.QueryFieldDefinition} """ return [fdef for (fdef, _, _, _) in fielddefs]
0a831dc1eadad01a91bfb131feae4ecc0e5cb1f2
15,125
def ordinalize(given_number: int) -> str: """Ordinalize the number from the given number Args: given_number (int): integer number Example: >>> ordinalize(34) '34th' Returns: str: string in ordinal form """ suffix = ["th", "st", "nd", "rd"] thenum = int(given_number) if thenum % 10 in [1, 2, 3] and thenum not in [11, 12, 13]: return f'{thenum}{suffix[thenum % 10]}' else: return f'{thenum}{suffix[0]}'
e0b43b3b8353e9e79d2f13a36198782a2a5dcd73
15,126
def get_filenames(hda_dict): """ Generates a list of filenames taken from the results dictionary, retrieved with the function request_results_list. Parameters: hda_dict: dictionary initied with the function init, that stores all required information to be able to interact with the HDA API Returns: Returns a list of filenames for each entry stored in the dictionary returned by the function request_results_list. """ fileName = [] for file in hda_dict['results']['content']: fileName.append(file['filename']) return fileName
c14911b14fa2b31f061b4420875c603d8acce5c1
15,127
def get_fibonacci_ints(ints, length): """f(n) = f(n-1) + f(n-2)""" first = ints[0] second = ints[1] if length == 1: return [first] if length == 2: return [first, second] ints = get_fibonacci_ints(ints, length=length - 1) ints.append(ints[-1] + ints[-2]) return ints
ff9e1691183560f12cb844af0e0a9b9f9579efd6
15,130
import numpy import pandas def metrics_to_pandas(metrics): """ Place holder for an attempt to make a generic Pandas object for the metrics service. However, for this to work well, you need to know all the possible year keys, and so currently as it is written it won't result in necessarily all the entries (i.e., there are different number of years in 'reads' and 'citations', etc.). There is a smarter recursive way to do it, so please update if you have time. """ years = metrics['histograms']['publications']['all publications'].keys() years.sort() data = {} tree = ['metrics'] def expand(m, d, tree): try: for key in m: try: d[':'.join(tree)] = numpy.array([float(m[i]) for i in years]) except KeyError: tree.append(key) expand(m[key], d, tree) except TypeError: data[':'.join(tree)] = m tree.pop() return tree.pop() expand(metrics, data, tree) return pandas.DataFrame(data, index=pandas.DatetimeIndex(years))
af1ae73f2b94680adb838682d97ac926a34ec36e
15,131
def is_copula_relation(token1, token2): """Return True if `token1` is a copula dependent of `token2`. We don't want to capture cases where `token2` is an adjective, because we capture those in `is_predicated_adjective_relation()`. """ return ( (token1.deprel == "cop") and (token2.upos != "ADJ") and (token1.head == token2.id) )
35e7b19c3cf1662c09a8fd80c8099073de11bc51
15,133
def get_glass_spec_category(glass_spec_name): """ガラスの仕様の区分はガラスの仕様に応じて返す Args: glass_spec_name(str): ガラスの仕様 Returns: int: ガラスの仕様の区分はガラスの仕様に応じて返す """ # 表3 ガラスの仕様の区分 table_3 = { '2枚以上のガラス表面にLow-E膜を使用したLow-E三層複層ガラス(日射取得型)': 6, '2枚以上のガラス表面にLow-E膜を使用したLow-E三層複層ガラス(日射遮蔽型)': 3, 'Low-E三層複層ガラス(日射取得型)': 6, 'Low-E三層複層ガラス(日射遮蔽型)': 6, 'Low-E二層複層ガラス(日射取得型)': 3, 'Low-E二層複層ガラス(日射遮蔽型)': 4, '二層複層ガラス': 2, '単板ガラス2枚を組み合わせたもの': 2, '単板ガラス': 1, '単板ガラスと複層ガラスを組み合わせたもの': 5, '単板ガラスとLow-E複層ガラスを組み合わせたもの(日射取得型)': 7, '単板ガラスとLow-E複層ガラスを組み合わせたもの(日射遮蔽型)': 6, } return table_3[glass_spec_name]
e5fbd29c438384931102744a97e7cd94c96a8478
15,134
import math def calculate_coordinates(valid_dict: list) -> list: """ Calculate the coordinates using trigonometry This takes a list of dictionaries of the format `[{'testuser':[6, 4, 3, 10, 5]}, {'testuser2':[1, 5, 17, 20, 6]}]` and then returns a list of coordinates. These coordinates can be used to represent an irregular polygon whose area can be calculated using shoelace method. :param valid_dict: List of dictionaries having key-value of `{username:[score_list]}` :type valid_dict: list :returns: A list of coordinates `[(2, 0.00), (1, 3.34)]` :rtype: list """ score_list = [] for dictionary in valid_dict: score_list.append([dictionary['score'][subclass] for subclass in dictionary['score']]) theta = (2*math.pi)/len(score_list[0]) coordinate_list = [] for score in score_list: coordinates = [] for index, coordinate in enumerate(score): angle = (theta/(2*math.pi))*index slope = math.tan(angle) coordinates.append((coordinate, coordinate*slope)) coordinate_list.append(coordinates) return coordinate_list
b648a965c83df841778fe4030e2b9729cbac2c59
15,135
import os def get_file(filename=None): """ Returns either imported.expt or imported_experiments.json as filename Parameters ---------- filename: str, optional user can provide which of the imported file to use or program does the searching for you Returns ------- str the actual filename of the file we will be using to extract data from """ if filename is None: if os.path.isfile("imported.expt"): filename = "imported.expt" elif os.path.isfile("imported_experiments.json"): filename = "imported_experiments.json" else: print("No available file ...\nMake sure you imported the experiments using dials.import ...") return filename
3f66b4635ee20c37fc65ff02cc670682226ba901
15,136
def _safe_decr(line_num): """ Return @line_num decremented by 1, if @line_num is non None, else None. """ if line_num is not None: return line_num - 1
ad6092b68240f39ccba13fda44bbf8af22d126f4
15,137
def zfun(p,B,pv0,f): """ Steady state solution for z without CRISPR """ return p*(B-1/pv0)/(1+p*(B-1/pv0))
aaddce7a41e0ad9e909e6f5f2bf86354f66c5be8
15,139
def _build_verbose_results(ret, show_success): """ Helper function that builds the results to be returned when the verbose parameter is set """ verbose_results = {'Failure': []} for tag_data in ret.get('Failure', []): tag = tag_data['tag'] verbose_results['Failure'].append({tag: tag_data}) verbose_results['Success'] = [] for tag_data in ret.get('Success', []): tag = tag_data['tag'] verbose_results['Success'].append({tag: tag_data}) if not show_success and 'Success' in verbose_results: verbose_results.pop('Success') verbose_results['Controlled'] = [] for tag_data in ret.get('Controlled', []): tag = tag_data['tag'] verbose_results['Controlled'].append({tag: tag_data}) if not verbose_results['Controlled']: verbose_results.pop('Controlled') return verbose_results
53f5ad2525893e7277014664555894f51c601d4d
15,143
def _parse_year(cover_display_date): """ :param cover_display_date: :return: entry year """ return cover_display_date[-4:]
c722973f8663aabe15207cc995cff5ec9aece575
15,144