content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def generate_by_embedding_model(lex, embedding_model, k=25): """Described in "Exploring Word Embeddings for Unsupervised Textual User-Generated Content Normalization, Bertaglia and Nunes(2016)" Args: lex (dict): The lexicon dictionary. embedding_model (obj): The embedding model in word2vec format. Must be readable by gensim. k (int): Number of neares neighbours to evaluate (all experiments ran with k=25). Returns: dict(str: list(str)): A list of possible corrections for each word. """ cands = {} corrs = {} cands = { word: [ sims[0] for sims in embedding_model.most_similar(word, topn=k) if sims[0] not in lex ] for word in lex if word in embedding_model } for word in cands: for c in cands[word]: if c not in corrs: corrs[c] = [] corrs[c].append(word) return corrs
3d5ec8d9769b6cd7858d71c67d6f7daddb473218
408,310
def del_keys(src_dict, del_keys): """Deletes redundant_keys from conf. :param src_dict: a dict :param del_keys: a list/set/etc with key names that we want to delete. :return: the copy of dict without keys from del_keys. """ return {key: value for key, value in src_dict.items() if key not in del_keys}
6c3b7189e47ae8bc1f713943b443fed3c8e42573
301,227
def same_record(lr, rr): """ A test to see if two records name the same zone entry. """ return lr["rrset_name"] == rr["rrset_name"] and lr["rrset_type"] == rr["rrset_type"]
29074052a2a503242540cdda9d6ed2c737f6fda7
141,809
def Eq(state, command, *column_values): """Equals filter Accepts one or more column-value pairs. Keep only rows where value in the column equals specified value. Example: Eq~column1~1 """ df = state.df() for c,v in state.expand_column_values(column_values): state.log_info(f"Equals: {c} == {v}") index = df[c] == v try: index = index | (df[c] == int(v)) except: pass try: index = index | (df[c] == float(v)) except: pass df = df.loc[index,:] return state.with_df(df)
df3297444d3c91d1b88a118593f429583e9b4e94
56,101
def types_of_elements(elements): """ Characterises whether all the elements passed in the input list are 'diagonals' (i.e. if elements=['11', '22', '33']), 'off-diagonals' (i.e. if elements=['12', '21', '42']), or 'both' (i.e. if elements=['11', '22', '21', '12']). String descriptions of 'diagonals', 'off-diagonals', or 'all' may also be passed. Parameters ---------- elements : list of str A list of string represntations of the elements of a matrix, numbered with indexing starting at 1; i.e ['11', '12', '21', '22']. Alternatively, the string description can also be passed, in accordance with the specification for the elements argument to the figures.complex_space_time() method. Returns ------- str The characterisation of the list of elements as a whole, returning either 'diagonals', 'off-diagonals', or 'both'. None If the value of elements is passed as None. """ # If elements is passed as None if elements is None: return None # If elements pass as 'all', 'off_diagonals', or 'diagonals' if isinstance(elements, str): assert elements in ['all', 'off-diagonals', 'diagonals'] if elements == 'all': return 'both' return elements # If elements are passed in list form i.e. ['11', '21', ...] if isinstance(elements, list): if all([int(element[0]) == int(element[1]) for element in elements]): return 'diagonals' if all([int(element[0]) != int(element[1]) for element in elements]): return 'off-diagonals' return 'both' raise ValueError('Incorrect format for elements')
18ca6bd4e9f04bbfda98016f20b6b2fabb211274
382,717
import mimetypes def get_mimetype(filename): """ Get mimetype for local file filename: string representation of file's path """ mime, mime_sub = mimetypes.guess_type(filename) if not mime: mime = "application/octet-stream" elif mime_sub: mime = f"{mime}+{mime_sub}" return mime
0090bec93a7028e9c9e2791c01fed5bd2565b091
180,981
def isotopeMaxBD(isotope): """Setting the theoretical max BD shift of an isotope (if 100% incorporation). Parameters ---------- isotope : str name of isotope Returns ------- float : max BD value """ psblIsotopes = {'13C' : 0.036, '15N' : 0.016} try: return psblIsotopes[isotope.upper()] except KeyError: raise KeyError('Isotope "{}" not supported.'.format(isotope))
7fff5bc6a54034e68357af6a08e000de34d59283
43,081
from datetime import datetime def parse_iso8601(string): """ Parses a string such as 2020-01-15T12:29:59.432228Z and returns a matching datetime object. """ return datetime.strptime(string, "%Y-%m-%dT%H:%M:%S.%fZ")
ec93ad26eb06fc1c67344a39baee6e7f83405fd5
467,565
def _impaired_or_not(z_score, cutoff): """ Dichotimize z-score by applying a cutoff :param z_score: the z-score, i.e. performance relative to a reference population :param cutoff: the cut-off to decide impaired (<=) or preserved (>) on the cognitive domain :return: 1 if impaired, 0 if preserved """ if z_score <= cutoff: return 1 else: return 0
8d53497beca4f4f7962cf3a49086c366f4c64fea
198,030
def play_episode(env, policy, render_option, min_steps): """ Play an episode with the given policy. :param min_steps: the minimum steps the game should be played :param env: the OpenAI gym environment :param policy: the policy that should be used to generate actions :param render_option: how the game play should be rendered :return: episode reward """ state = env.reset() done = False episode_reward = 0.0 step_cnt = 0 while not done or step_cnt < min_steps: if render_option == 'collect': env.render() action = policy(state) next_state, reward, done, _ = env.step(action) episode_reward += reward state = next_state step_cnt += 1 print('episode finished with reward {0}'.format(episode_reward)) return episode_reward
4644d80df0dcbfc1ca6172ec949235ea9b2b1f0e
109,355
def selectnpop(dic, lis, i=0): """If `dic[lis[i]]` exists, return it after popping `lis[i]`. Falls back on `dic[None]`. """ try: selection = dic[lis[i]] except (IndexError, KeyError): return dic[None] lis.pop(i) return selection
80af9794c0663c013dfc79d80dc083bc0e7bc8d4
286,695
def node_list_to_path(G, node_list): """ Given a list of nodes, return a list of lines that together follow the path defined by the list of nodes. Parameters ---------- G : networkx multidigraph route : list the route as a list of nodes Returns ------- lines : list of lines given as pairs ( (x_start, y_start), (x_stop, y_stop) ) """ edge_nodes = list(zip(node_list[:-1], node_list[1:])) lines = [] for u, v in edge_nodes: # if there are parallel edges, select the shortest in length data = min(G.get_edge_data(u, v).values(), key=lambda x: x['length']) # if it has a geometry attribute if 'geometry' in data: # add them to the list of lines to plot xs, ys = data['geometry'].xy lines.append(list(zip(xs, ys))) else: # if it doesn't have a geometry attribute, # then the edge is a straight line from node to node x1 = G.nodes[u]['x'] y1 = G.nodes[u]['y'] x2 = G.nodes[v]['x'] y2 = G.nodes[v]['y'] line = [(x1, y1), (x2, y2)] lines.append(line) return lines
861f2155ebf2a57b4b266fe347d688052c1bcb89
502,598
def get_suspecious_items(_items): """Items with libraryCatalog==Zotero These items are suspecious, cause they were imported from pdf files and maybe Zotero did not import the metadata properly. :param _items: Zotero library items :type _items: list containing dicts :returns: list containing dicts """ list_catalog_zotero = [] for item in _items: if "libraryCatalog" in item["data"]: catalog = item["data"]["libraryCatalog"] if catalog == "Zotero": list_catalog_zotero.append(item) return list_catalog_zotero
d067dc2756630b814552b461927c31530cb67e4f
636,843
def stable_softminus(x): """ Calculating log(1 - exp(x)) (x < 0) in a numerically stable way """ # negative normal range y1 = (1 - x.exp()).log() # negative zero y2 = (- x - x.pow(2)/2 - x.pow(3)/6).log() # negative infinity y3 = - x.exp() - (2*x).exp()/2 - (3*x).exp()/3 switch2 = (1 - x.exp()) == 0 switch3 = (1 - x.exp()) == 1 switch1 = ~switch2 ^ switch3 y1[~switch1] = 0 y2[~switch2] = 0 y3[~switch3] = 0 y = y1 + y2 + y3 return y
da6f017da496c3f49158c90073faab30dd2b7af2
183,093
def to_alnum(string): """Get rid of non alpahunmeric characters except underscores """ return ''.join(char for char in string if char.isalnum() or char == '_')
21a8179d98bd6ba6668aabe6b945751ed7821f9f
351,840
def get_bbox_inside_image(label_bbox: list, image_bbox: list) -> list: """ Corrects label_bbox so that all points are inside image bbox. Returns the corrected bbox. """ xA = max(label_bbox[0], image_bbox[0]) yA = max(label_bbox[1], image_bbox[1]) xB = min(label_bbox[2], image_bbox[2]) yB = min(label_bbox[3], image_bbox[3]) corrected_label_bbox = [xA, yA, xB, yB] return corrected_label_bbox
c14d06796d1668d06d39ffec6f66e1bc6929b677
683,308
def get_X_Y(**cosmo): """The fraction of baryonic mass in hydrogen and helium. Assumes X_H + Y_He = 1. You must specify either 'X_H', or 'Y_He', or both. """ if 'X_H' in cosmo and 'Y_He' not in cosmo: X_H = cosmo['X_H'] Y_He = 1. - X_H elif 'Y_He' in cosmo and 'X_H' not in cosmo: Y_He = cosmo['Y_He'] X_H = 1. - Y_He else: X_H = cosmo['X_H'] Y_He = cosmo['Y_He'] return X_H, Y_He
02268d861543ba0bc7deaafa3c31d0cc50231112
670,645
import json def parse_user_parameters(params_str): """Parses a given UserParameters string. A UserParameters string must be a JSON object. :type params_str: str :param params_str: UserParameters string to be parsed. :rtype: dict :return: parsed user parameters. :raises JSONDecodeError: if ``params_str`` is not a valid JSON text. :raises ValueError: if ``params_str`` is not a JSON object. """ params = json.loads(params_str) if not isinstance(params, dict): raise ValueError(f'UserParameters must be a JSON object not {type(params)}') return params
2e464aba708b2249323c3eec4855d72e8900eac3
76,104
def replacer(svgFile, toReplace, newData): """ Searches through SVG file until it finds a toReplace, once found, replaces it with newData """ for count in range(0,len(svgFile)): found = svgFile[count].find(toReplace) #Check if the current line in the SVG file has the required string if not (found == -1): location = (svgFile[count]).find(toReplace) #We know exact location on the line that Name and Twitter are partone = (str(svgFile[count])[:location]) #Grab part of line before the searched for string parttwo = (str(svgFile[count])[(location+len(toReplace)):]) #Grab part of line after the searched for string svgFile[count] = partone + newData + parttwo break return svgFile
36cc896bb12e9e8d9539b072986ea4c255912a34
680,728
def _invoice_or_bill_status_to_classname(invoice_or_bill): """ Return the appropriated css classname for the invoice/bill status """ if not invoice_or_bill.pass_full_checking(): checks = invoice_or_bill.full_check() for c in checks: if c.level == c.LEVEL_ERROR: return 'danger' return 'warning' if invoice_or_bill.is_fully_paid(): return 'success' elif invoice_or_bill.is_partially_paid(): return 'info' else: return ''
673e7ed4fb10fc10154efd2f9072cc8b92ae6a43
424,128
def output_notebook(name: str, input: str) -> str: """Generate name for output notebook. If an output name is given it is returned as it is. Otherwise, the name of the input notebook will have the suffix ``.ipynb`` replaced by ``.out.ipynb``. If the input notebook does not have a suffix ``.ipynb`` the suffix ``.out.ipynb`` is appended to the input notebook name. Parameters ---------- name: string User-provided name for the output notebook. This value may be None. input: string Name of the input notebook. Returns ------- string """ if name: return name return input[:-6] + '.out.ipynb' if input.endswith('.ipynb') else input + '.out.ipynb'
1e56dd877888b591345f67d7e2537716d6b07ba9
229,983
def _py_not(a): """Default Python implementation of the "not_" operator.""" return not a
f259dff4c26846543851f470e2b168f75e3e3fd1
254,665
def _get_h3_range_lst(h3_min, h3_max): """Helper to get H3 range list.""" return list(range(h3_min, h3_max + 1))
f179d2ed880a87cf7a8ef37972b805ced4406a1e
313,370
def get_slice(img, ori, slc): """ Extract one slice from a 3D numpy.ndarray image. :param img: np.array. input image to extract slice. :param ori: int. orientation to extract slice. 1. sagittal. 2. coronal. 3. axial. :param slc: int. slice number to extract. :return: extracted 2D slice: np.array. """ if ori == 1 and 0 <= slc < img.shape[0]: return img[slc, :, :] elif ori == 2 and 0 <= slc < img.shape[1]: return img[:, slc, :] elif ori == 3 and 0 <= slc < img.shape[2]: return img[:, :, slc] else: raise Exception('Invalid orientation or slice number')
dcf03eec0d16c68f55f701b2d312dbf4fa946ee7
697,503
def rest_api_port_number(port_generator): """ Unique port for the REST API server. """ return next(port_generator)
5c4343d4dd44321acada2b155bb9294a98154d83
446,678
def classname(value, options=''): """ Returns classname. If options is given, checks if class name is in there before returning, else it returns None. """ cn = value.__class__.__name__.lower() if not options or cn in options: return cn return ''
396fd12ec4de7516c99f7ca4dea601d18119b525
386,273
def count_tracks_by_artist(df, artist): """returns count of tracks given dataframe and artist name; dataframe has to have `name_artist` and `track_id` columns""" mask = df["name_artist"] == artist return df.loc[mask, "track_id"].count()
cac9d343e1293981f5173d4ec151d4e25dc121c6
207,090
def calculate_new_average(avg, N, new_val): """Calculate new average given a new value and an existing average. Args: avg: The old average value. N: The old number of data points averaged over. new_val: The new value to recalculate the average with. Returns: The new average value. """ return (avg * N + new_val) / (N + 1)
d810f590a3a46b16019e78861384ad754bc13ba0
378,999
def stringify(sentence, vocab): """ Given a numericalized sentence, fetch the correct word from the vocab and return it along with token indices in the new sentence Example: sentence : "1 2 3" vocab : { 1 : "Get" 2 : "me" 3 : "water" } return value : [ ["Get", [0, 3]], ["me", [4, 6]], ["water", [7, 12]], ] """ return_val = [] length_so_far = -1 for idx in sentence.strip().split(" "): beg_index = length_so_far + 1 word = vocab[int(idx)] end_index = beg_index + len(word) length_so_far = end_index return_val.append([word, [beg_index, end_index]]) return return_val
ed66d167ca03a22b5a249710dbb652358218ed4e
393,817
def strftime(datetime, formatstr): """ Uses Python's strftime with some tweaks """ return datetime.strftime(formatstr).lstrip("0").replace(" 0", " ")
413d552bb95a25d87eff2fd7400bf1114ee5d4a2
389,115
def calculate_zf_NAP(df, z_id): """ Assign the zf with respect to NAP to each layer of a DataFrame. :param df: Original DataFrame. :param z_id: (float) Elevation with respect to the NAP of my field. :return: DataFrame with zf_NAP column. """ df["zf_NAP"] = z_id - df["zf"] return df
90421d94043d5c53c34b5934897d84948b6fec36
300,611
def select_year(movie_list:list, year:int): """ Select movies filmed in a specific year and returns a list of tuples (name, location) """ n_year_movies = [] for movie in movie_list: if year in movie: n_year_movies.append((movie[0],movie[-1])) print(f"Selected {len(n_year_movies)} movies/shows filmed in {year}.") return n_year_movies
95aa1c4a9fa6e163f6df734648a8caa077fdbdd4
674,991
import random def get_random_int() -> str: """ Returns random int number """ number = str(random.randint(-2147483648, 2147483648)) return number
b4b24c8cd746038b6571e7c80b882ebdc2392786
138,982
from typing import Callable from datetime import datetime def on_date(month: int, day: int) -> Callable[[datetime], bool]: """ Filter that allows events that match a specific date in any year Args: - month (int): the month as a number (1 = January) - day (int): the day as a number Returns: - Callable[[datetime], bool]: a filter function """ def _filter_fn(dt: datetime) -> bool: return (dt.month, dt.day) == (month, day) return _filter_fn
4f2c166a12505b1d1e0e9aec55e06e0e94d5efc7
634,167
from pathlib import Path def _getname(infile): """ Get file names only. Examples -------- >>> _getname("drop/path/filename.txt") 'filename.txt' >>> _getname(["drop/path/filename.txt", "other/path/filename2.txt"]) ['filename.txt', 'filename2.txt'] """ if isinstance(infile, (list, tuple)): return [Path(f).name for f in infile] return Path(infile).name
2384a747a72ea28f107610bc4aa609096366c433
620,289
def _pad_b64(b64): """Fix padding for base64 value if necessary""" pad_len = len(b64) % 4 if pad_len != 0: missing_padding = (4 - pad_len) b64 += '=' * missing_padding return b64
b07e96004946573bef11543b6b6983e5b265276a
529,986
def remove_terms(pos_synsets, remove_dict, issynset=False): """ Remove synsets from the part of speech synset dictionary based on the word. :param pos_synsets: dictionary mapping pos to list of bi-tuples, where first item is the word and second item is the synset :type pos_synsets: dict :param remove_dict: dictionary mapping pos to list of words to remove :type remove_dict: dict :param issynset: is remove_dict refering to words or synsets :type issynset: bool :returns: dictionary of pos_synsets after removal :rtype: dict """ out_dict = {} for pos, synsets in pos_synsets.items(): remaining_synsets = [] for synset in synsets: if issynset: if not (synset[1].name() in remove_dict[pos]): # print(synset[1]) remaining_synsets.append(synset) else: if not (synset[0] in remove_dict[pos]): remaining_synsets.append(synset) out_dict[pos] = remaining_synsets return out_dict
cf4f65584f16fe5a3105ecc78b696af0281dbfe7
466,046
def check_bracket_sequence(bracket_sequence: str) ->bool: """ Checks bracket sequence is correct or not. :param bracket_sequence: string sequence of brackets :return: True if sequence is correct, e.g. each open bracket has its closed variant, else False """ opening_brackets = ['(', '[', '{'] closing_brackets = [')', ']', '}'] stack = [] for element in bracket_sequence: if element in opening_brackets: stack.append(element) else: if opening_brackets.index(stack[-1]) != closing_brackets.index(element): return False stack.pop(-1) if len(stack): return False return True
18374afcfe9e8ba562e9099a55ec5d2dac5df4e1
556,188
def get_filtered_atom_list(struct): """Filters out unwanted atoms in PDB structure. Removes heteroatoms from atoms when finding neighbor residues. Parameters ---------- struct : Bio.PDB structure biopython structure object Returns ------- atom_list : list list of atoms after filtering """ atom_list = [] for model in struct: for chain in model: for residue in chain: # ignore hetero residues if not (residue.get_full_id()[3][0] == ' '): continue # add residue atoms atom_list.extend(residue.get_list()) return atom_list
b839067b026a346332f6a5b3220ca010f1d4cca5
470,270
def test_func(context): """ OCID thread function for testing purposes. Parameters ---------- context : dict The thread context. Returns ------- dict The new context. """ if 'fname' not in context: raise ValueError("fname must be defined in the context") if 'counter' not in context: raise ValueError("counter must be defined in the context") with open(context['fname'], "w+") as f: f.write("hello %d\n" % context['counter']) context['counter'] += 1 return context
6c24a8c6601e9d975900217f626b58e87f6cac29
188,844
from unittest.mock import Mock from unittest.mock import patch from typing import cast def timer() -> Mock: """Mock our periodic timer""" with patch("edge.edge.PeriodicTimer", autospec=True) as mock: return cast(Mock, mock.return_value)
07ab8821f7bacc53ab11e0d8a37cc03c24218253
569,627
import warnings def upsert_into(self, table): """ Deprecated. Use `upsert_into_on_conflict_do_nothing` instead """ warnings.warn( "`upsert_into` is deprecated. Please use `upsert_into_on_conflict_do_nothing`", DeprecationWarning, ) return self.upsert_into_on_conflict_do_nothing(table)
d08217f8bf2160836898177edd40cb1ed3d00836
397,229
import re def getmatches(datafilelist, regex=None): """Takes list of search strings + regex. Returns a list of match objects""" if not regex: regex = re.compile( r""" (?P<ftype>[A-Z0-9]{5}) # band type of data file _[a-z]+ # sat id _d(?P<date>\d{8}) # acq date _t(?P<time>\d{7}) # granule start time UTC _e\d+ # granule end time UTC _b(?P<orbit>\d+) # orbit number _c\d+ # file creation date/time _\w+.h5 # more stuff """, re.X ) return [regex.search(filename) for filename in datafilelist]
99f7684e9fae3a3beed5a2a26f28ea2fe3ab9f14
421,892
def groupby(iterable, key=None): """ Group items from iterable by key and return a dictionary where values are the lists of items from the iterable having the same key. :param key: function to apply to each element of the iterable. If not specified or is None key defaults to identity function and returns the element unchanged. :Example: >>> groupby([0, 1, 2, 3], key=lambda x: x % 2 == 0) {True: [0, 2], False: [1, 3]} """ groups = {} for item in iterable: if key is None: key_value = item else: key_value = key(item) groups.setdefault(key_value, []).append(item) return groups
dccd69a0ffdc30e6d8b3950f14e3e2649e4c4409
657,456
def divisible_by_5(s): """Return list of binary numbers that are divisible by 5.""" return ','.join([x for x in s.split(',') if int(x, 2) % 5 == 0])
46ab315bd313790e64e057081049736b675606ab
554,213
def interval_cover(I): """Minimum interval cover :param I: list of closed intervals :returns: minimum list of points covering all intervals :complexity: O(n log n) """ S = [] for start, end in sorted(I, key=lambda v: (v[1], v[0])): if not S or S[-1] < start: S.append(end) return S
df312f84af73cb1fa04b2d83e8d7133a0b0bfa00
387,506
def get_table_title(line): """Get table name. Args: line(str): output """ if ":" in line: table_title = line.split(":")[0] return table_title else: return None
bfb9e3db2b95e8e087492ff9c6c5e6a9f3284ec6
541,541
def clean_cases(text: str) -> str: """Makes text all lowercase. Arguments: text: The text to be converted to all lowercase. Returns: The lowercase text. """ return text.lower()
35f2c5ec1088f9e2fe0bba7a3f2f3f4b82333925
539,715
import io def parse_prompt_file(prompt_file): """ Parse the UltraSuite prompt file. First line contains the prompt and the second contains the date and time. :param prompt_file: :return: """ with io.open(prompt_file, mode="r", encoding='utf-8', errors='ignore') as prompt_f: return [prompt_f.readline().rstrip(), prompt_f.readline().rstrip()]
f93030d61d03d44890fa3333454fa2b124056363
480,427
def build_sprint(operational_year, operational_quarter, sprint_length, sprint_index): """ Helper function to build Sprint name for JIRA. :param String operational_year: :param String operational_quarter: :param String sprint_length: :param String sprint_index: :return: Formatted Sprint name :rtype: String """ return 'Y%s-Q%s-L%s-S%s' % ( operational_year, operational_quarter, sprint_length, sprint_index )
a3b6ec2c0bf370b0b4d08eb3b5e0a3cbba2386b6
217,210
def shorten_task_names(configs, common_task_prefix): """ Shorten the task names of the configs by remove the common task prefix. """ new_configs = [] for config in configs: config.task_name = config.task_name.replace(common_task_prefix, "") new_configs += [config] return new_configs
b73ce8be7adece6da1f6ed218f3d78c52c5b1d94
625,522
def make_date_string(date_tuple): """Accepts a date tuple (year,month,day) and returns a formatted string of the form YYYY-MM-DD.""" year,month,day = date_tuple return "%4d-%02d-%02d" % (year,month,day)
f9b7728ffe39eff38f3fb27783de70a8a84c36af
528,555
def _make_intro_dict(request): """ make poet's introduction dict :param request: wsgi request :return: dict """ param = request.args return { 'poet': param.get('poet', '李白'), 'dynasty': param.get('dynasty', '唐') }
0de7636f774880517527867094095dee4b7ca1d7
582,706
def win(game_df, verbose=False): """ if game_df.iloc[-1].status == 'GAME_END', adds win loss label columns Args: game_df (pd.DataFrame) verbose (Bool): print dataframe after - takes the last row - checks if the status is 'GAME_END' - then adds new columns for the winner of the game based on the score """ case = "" last_row = game_df.iloc[-1, :] status = last_row.status ret = None if status == "GAME_END": if last_row.a_pts > last_row.h_pts: a_win = True h_win = False case = f"away {last_row.a_team} win" elif last_row.a_pts < last_row.h_pts: a_win = False h_win = True case = f"home {last_row.h_team} win" else: case = "game tie" a_win = False h_win = False game_df["a_win"] = a_win game_df["h_win"] = h_win ret = game_df else: case = "no game end status" if verbose: print(case) return ret
25691da33aa6c3cffeb0ca7443448e4f0967a0d4
155,410
import re def __replace_all(repls: dict, input: str) -> str: """ Replaces from the string **input** all the occurrence of the keys element of the dictionary **repls** with their relative value. :param dict repls: dictionary containing the mapping between the values to be changed and their appropriate substitution; :param str input: original string. :return: *(str)*, string with the appropriate values replaced. """ return re.sub('|'.join(re.escape(key) for key in repls.keys()), lambda k: repls[k.group(0)], input)
dedfce66d06122d88b9f7ca799480c0d4a0af956
622,981
from pathlib import Path def list_of_files(tmpdir_factory): """Create a dummie list of files for s3 list objects by date.""" tmp_file = tmpdir_factory.mktemp("tmp").join("temp_text_file_1.txt") tmp_file2 = tmpdir_factory.mktemp("tmp").join("temp_text_file_2.txt") tmp_file3 = tmpdir_factory.mktemp("tmp").join("temp_text_file_3.txt") for file in [tmp_file, tmp_file2, tmp_file3]: file.write_text("botree test!", encoding="utf-8") return [Path(tmp_file), Path(tmp_file2), Path(tmp_file3)]
415554c4aa7dd0983562d1de1a79d45cb7fb5430
412,585
def get_doc(collection, doc_id): """Retrieve a Firestore doc, with retries to allow Function time to trigger""" doc = collection.document(doc_id).get() if doc.exists: return doc
37d97f47f1c556cb7009e23d1c04f46c67902a49
32,343
import torch def swish(x): """ Swish activation function, from Ramachandran, Zopf, Le 2017. "Searching for Activation Functions" """ return x * torch.sigmoid(x)
f630c0851e1afe0ac72ff2f5dd06d787fa556194
598,510
def create_histogram_dict(words_list): """Return a dictionary representing a histogram of words in a list. Parameters: words_list(list): list of strings representing words Returns: histogram(dict): each key a unique word, values are number of word appearances """ histogram = dict() words = histogram.keys() for word in words_list: if word.lower() not in words: histogram[word.lower()] = 1 else: histogram[word.lower()] += 1 return histogram
1da2d02b888cc5eca5e40dca2814a11bdb69cebb
514,275
def realtag(element): """Strip namespace poop off the front of a tag.""" try: return element.tag.rsplit('}', 1)[1] except ValueError: return element.tag
8db0494b0651b78d7437cdd88301ace4005f3971
495,198
def find_and_assign(current_function, order_list, l_in, start_index): """ Assigns the values in front of the monomial to the appropriate element of transform matrix L Parameters ---------- current_function : Poly i-th Lie derivative order_list : List[tuple] Order of monomials l_in : csr_matrix Coefficient matrix which maps monomials to polynomials start_index : int start point of Lie derivative in the linear matrix L Returns ------- linear matrix L """ # Get dimension of current Lie derivative n = len(current_function) # iterate over each element of Lie derivative for i in range(0, n): # Get monomials of Lie derivative monomials_i = current_function[i].monoms() # Get coefficients of monomials coefficients_i = current_function[i].coeffs() # Iterate over each monomial for j in range(0, len(monomials_i)): # Find the corresponding column number for the monomial col_number = order_list.index(monomials_i[j]) # Update element of linear matrix L l_in[start_index + i, col_number] += coefficients_i[j] return l_in
3c65ef92952556575f14b7b9fd3463af93c9aa9f
347,814
def check_invalid(string,*invalids,defaults=True): """Checks if input string matches an invalid value""" # Checks string against inputted invalid values for v in invalids: if string == v: return True # Checks string against default invalid values, if defaults=True if defaults == True: default_invalids = ['INC','inc','incomplete','NaN','nan','N/A','n/a','missing'] for v in default_invalids: if string == v: return True # For valid strings return False
6e9e20beebe8e0b0baed680219fd93453d7f4ce3
706,167
import logging def data_compare(real_out, expect_out, atol=0.001, rtol=0.001): """Compare the output between the real and the expect. :param real_out: the real output :type real_out: list :param expect_out: the expect putput, or the benchmark :type expect_out: list :param atol: the absolute error, defaults to 0.001 :type atol: float, optional :param rtol: the relative error, defaults to 0.001 :type rtol: float, optional :return: return the error count and the error ratio :rtype: [type] """ error_count = 0 if len(real_out) != len(expect_out): raise ValueError("The size of real_out and expect_out must be equal.") for n in range(len(real_out)): if abs(real_out[n] - expect_out[n]) > atol or abs(real_out[n] - expect_out[n]) / abs(expect_out[n]) > rtol: logging.warning("pos: {}, real_out: {}, expect_out: {}, diff: {} " .format([n], real_out[n], expect_out[n], real_out[n] - expect_out[n])) error_count += 1 return error_count, error_count / len(real_out)
4c7b048d64f83cd189885ba08a37f0d937cd91a5
308,848
import torch def invSquare(input, axis=1): """ Apply inverse square normalization on input at certain axis. Parammeters: ---------- input: Tensor (N*L or rank>2) axis: the axis to apply softmax Returns: Tensor with softmax applied on that dimension. """ input_size = input.size() trans_input = input.transpose(axis, len(input_size)-1) trans_size = trans_input.size() input_2d = trans_input.contiguous().view(-1, trans_size[-1]) square_2d = input_2d**(-2) sum_square_2d = torch.sum(square_2d, 1, keepdim=True) square_norm_2d = square_2d/sum_square_2d square_norm_nd = square_norm_2d.view(*trans_size) return square_norm_nd.transpose(axis, len(input_size)-1)
ca7c7702ab1d36cf2ae474dbf10981bf1dab7f4b
73,062
import textwrap import re def unwrap(text: str) -> str: """ Unwraps multi-line text to a single line """ # remove initial line indent text = textwrap.dedent(text) # remove leading/trailing whitespace text = text.strip() # remove consecutive whitespace return re.sub(r"\s+", " ", text)
107c192765c798216ccc49b972c8d0fa49c4a470
82,714
def apply_weighting(object_to_be_weighted, weights): """ Replicate the components of object_to_be_weighted using the corresponding weights to define the number of replicates. Args: object_to_be_weighted: could be a list or an array weights: a list of integers Returns: the transformed object """ zipped = zip(object_to_be_weighted, weights) weighted_object = [] for item in zipped: for j in range(item[1]): weighted_object.append(item[0]) return weighted_object
51161b4ed6e6540390487c40613838083f00fd3b
691,675
def adam_args(parser, dbeta1=0.99, dbeta2=0.99, depsilon='1e-8', dbeta1_fb=0.99, dbeta2_fb=0.99, depsilon_fb='1e-8'): """ Training options for the Adam optimizer Args: parser (argparse.ArgumentParser): argument parser (....): Default values for the arguments Returns: The created argument group, in case more options should be added. """ agroup = parser.add_argument_group('Training options for the ' 'Adam optimizer') agroup.add_argument('--beta1', type=float, default=dbeta1, help='beta1 training hyperparameter for the adam ' 'optimizer. Default: %(default)s') agroup.add_argument('--beta2', type=float, default=dbeta2, help='beta2 training hyperparameter for the adam ' 'optimizer. Default: %(default)s') agroup.add_argument('--epsilon', type=str, default=depsilon, help='epsilon training hyperparameter for the adam ' 'optimizer. Default: %(default)s') agroup.add_argument('--beta1_fb', type=float, default=dbeta1_fb, help='beta1 training hyperparameter for the adam ' 'feedback optimizer. Default: %(default)s') agroup.add_argument('--beta2_fb', type=float, default=dbeta2_fb, help='beta2 training hyperparameter for the adam ' 'feedback optimizer. Default: %(default)s') agroup.add_argument('--epsilon_fb', type=str, default=depsilon_fb, help='epsilon training hyperparameter for the adam ' 'feedback optimizer. Default: %(default)s') return agroup
5e85c476c00ed824f6b4392bfb5f6a36d8fc93ab
147,331
def sanitizeStr(data): """ Escape all char that will trigger an error. Parameters ---------- data: str the str to sanitize Returns ------- str The sanitized data. """ data = " ".join(data.split()) new_msg = [] for letter in data: if letter in ['"',"\\"]: new_msg.append("\\") new_msg.append(letter) return "".join(new_msg)
6fcd1455a01997d526cfd178d98ee3e9eca3c888
28,915
import ast def subscript(value: ast.expr, idx: ast.expr) -> ast.Subscript: """Generate an ast subscript operation""" return ast.Subscript(value=value, slice=idx, ctx=ast.Load())
976d08a2c320f66e5c8afe09c2ef26d45fceabac
163,794
def mesh_size(mesh, unit_length): """ Return the maximum extent of the mesh along any of the x/y/z axes. """ coords = mesh.coordinates() max_extent = max(coords.max(axis=0) - coords.min(axis=0)) return max_extent * unit_length
52f80c17df7be749742a7c99f779cef8734da65d
304,926
def check_number(x, lower=None, upper=None): """ This function returns True if the first argument is a number and it lies between the next two arguments. Othwerise, returns False. :param x: Value to be checked :type x: number :param lower: minimum value accepted :type lower: number :param upper: maximum value accepted :type upper: number :return: if 'x' check pass :rtype: bool """ if x is None: return False lower = lower if lower is not None else float("-inf") upper = upper if upper is not None else float("inf") is_number = isinstance(x, float) or isinstance(x, int) return is_number and (x >= lower and x <= upper)
cfd16e876b2dc111679ceb3d49164f2718d73825
419,213
import pkgutil import importlib def scan_for_agents(do_registration=True): """Identify and import ocs Agent plugin scripts. This will find all modules in the current module search path (sys.path) that begin with the name 'ocs_plugin\_'. Args: do_registration (bool): If True, the modules are imported, which likely causes them to call register_agent_class on each agent they represent. Returns: The list of discovered module names. """ items = [] for modinfo in pkgutil.iter_modules(): if modinfo.name.startswith('ocs_plugin_'): items.append(modinfo.name) if do_registration: importlib.import_module(modinfo.name) return items
0052fafa9dc3936f64b502b8e4b9384835551445
325,548
def is_pow2(n): """ Check whether a number is power of 2 Parameters ---------- n: int A positive integer number Returns ------- bool """ return n > 0 and ((n & (n - 1)) == 0) # Solution 2 # return math.log2(n) % 1 == 0
6f386513eb4931339faf61c2552a3a8b7cde3053
639,692
def plug_parent(plug): """Return ``plug`` parent, if it has one, None otherwise. Args: plug (MPlug): Plug we want the parent of. Returns: MPlug, None: Example: >>> modifier = OpenMaya.MDagModifier() >>> node = OpenMaya.MFnDagNode(modifier.createNode('transform')) >>> modifier = modifier.doIt() >>> translate = node.findPlug('translate', False) >>> translate_x = node.findPlug('translateX', False) >>> plug_parent(translate) == None True >>> plug_parent(translate_x) == translate True """ if plug.isChild: return plug.parent() elif plug.isElement: return plug.array() return None
c4b32e13fd50d07b70db8db5ef3b7ca033034119
287,542
def get_simple_rl_task_names(task_names: list) -> list: """Simplifies the task name. For each task name in the provided list, this function splits the names using an underscore as delimiter, then returns the last element in the split list as the simplified name. Args: task_names (list): The list of task names to simplify. Returns: list: The list of simplified task names. """ simple_names = [] for t in task_names: splits = str.split(t, "_") simple_names.append(splits[-1].lower()) return simple_names
83c6b658d8ff5e19c330910c1184befcd1b57ff1
571,075
import requests def censusvar(src, year, var): """Download information on a list of variables from Census API. Args: src (str): Census data source: 'acs1' for ACS 1-year estimates, 'acs5' for ACS 5-year estimates, 'acs3' for ACS 3-year estimates, 'acsse' for ACS 1-year supplemental estimates, 'sf1' for SF1 data. year (int): Year of data. var (list of str): Names of Census variable. Returns: dict: Dictionary with keys 'concept' (overall concept the variable falls under), 'label' (variable label), and 'predicateType' (variable type). Examples:: censusdata.censusvar('sf1', 2010, ['P0010001']) # Returns information on the variable P0010001 from the 2010 Census SF1. """ assert src == 'acs1' or src == 'acs3' or src == 'acs5' or src == 'acsse' or src == 'sf1' ret = dict() for v in var: if src == 'acsse' or src == 'sf1' or v[0] == 'B': tabletype = '' elif v[0] == 'S': tabletype = 'subject/' elif v[:2] == 'DP': tabletype = 'profile/' elif v[:2] == 'CP': tabletype = 'cprofile/' elif v[0] == 'C': tabletype = '' else: raise ValueError(u'Unknown table type for variable {0}!'.format(v)) if (src == 'acs1' or src == 'acs5' or src == 'acsse') and year >= 2010: presrc = 'acs/' elif src == 'acs3': presrc = 'acs/' elif src == 'sf1': presrc = 'dec/' else: presrc = '' r = requests.get('https://api.census.gov/data/{year}/{presrc}{src}/{tabletype}variables/{v}.json'.format(src=src, year=year, v=v, tabletype=tabletype, presrc=presrc)) try: data = r.json() except: raise ValueError(u'Unexpected response (URL: {0.url}): {0.text} '.format(r)) try: assert data['name'] == v except AssertionError: raise AssertionError(u'JSON variable information does not include key "name"', data) expectedKeys = ['group', 'label', 'limit', 'name',] try: assert [k for k in sorted(data.keys()) if k != 'attributes' and k != 'concept' and k != 'predicateType'] == expectedKeys except AssertionError: print(u'JSON variable information does not include expected keys ({0} and possibly attributes, concept, predicateType) or includes extra keys: '.format(expectedKeys), data) try: ret[v] = [data.get('concept', ''), data['label'], data.get('predicateType', '')] # Concept, predicate type not provided for all years; default to empty if not provided except KeyError: raise KeyError(u'JSON variable information does not include expected keys: ', data) return ret
433c3676860b232a803b8c5b0aff6fffad55dab4
244,800
import torch def get_numerical_characteristics(data): """ 返回数据(data)的 4 个数字特征(numerical characteristics): 1. mean:均值 2. std:标准差 3. skewness: 偏度 4. kurtosis: 峰度 :param data: 数据 :return: 一维数据: torch.Size([4]) """ mean = torch.mean(data) diffs = data - mean var = torch.mean(torch.pow(diffs, 2.0)) std = torch.pow(var, 0.5) z_scores = diffs / std # 偏度:数据分布偏斜方向、程度的度量, 是数据分布非对称程度的数字特征 # 定义: 偏度是样本的三阶标准化矩 skewness = torch.mean(torch.pow(z_scores, 3.0)) # excess kurtosis, should be 0 for Gaussian # 峰度(kurtosis): 表征概率密度分布曲线在平均值处峰值高低的特征数 # 若峰度(kurtosis) > 3, 峰的形状会比较尖, 会比正态分布峰陡峭 kurtoses = torch.mean(torch.pow(z_scores, 4.0)) - 3.0 # reshape(1, ):将常量转化为torch.Size([1])型张量(Tensor) final = torch.cat((mean.reshape(1, ), std.reshape(1, ), skewness.reshape(1, ), kurtoses.reshape(1, ))) return final
9ea636b38f98d83304753a9e68712d1f4e81edd5
593,955
from pathlib import Path def extract_params(s3_filepath): """Extrai mês, ano e periodicidade do caminho do bucket.""" prefix = "tcmbapublicconsultation/" start_index = s3_filepath.find(prefix) + len(prefix) relative_path = Path(s3_filepath[start_index:]) folders = relative_path.parts year = folders[0] period = folders[1] month = None if period == "mensal": month = folders[2] return {"year": year, "period": period, "month": month}
ef055d3292defd1b5f38c9ba9dae5961d68628be
386,269
def clamp(x, inf=0, sup=1): """Clamps x in the range [inf, sup].""" return inf if x < inf else sup if x > sup else x
42c178afc0bdfc02fd31fe3f211f23cc04b40d2e
701,901
def is_empty_line(line): """ Return true iff line contains only whitespaces or is empty. Preconditions: None @type line: str @rtype: bool """ return len(line.lstrip()) == 0
22de3b36f2246ecc2333cc971bc72bc0369c99e7
446,553
def get_anion_neighbors(site, structure, radius, anions, get_distance=False): """ Gets neighboring anions of sites Args: :param site: (Site) target site to find anion neighbors of :param structure: (Structure) structure that contains target site :param radius: (float) radius to which anion neighbors should be looked for :param anions: (List of Strings) list of species considered anions :param get_distance: (boolean) whether or not to get distance between cation and anion :return: (List of either Sites or [Site, float]) list of either anion neighboring sites or [Site, float] if getDistance is True """ anion_neighbors = [] neighbors = structure.get_neighbors(site, radius) for neighbor in neighbors: if neighbor[0].species_string in anions and neighbor[1] < radius: if get_distance: anion_neighbors.append(neighbor) else: anion_neighbors.append(neighbor[0]) return anion_neighbors
e9473f77ee2b5006e79503ebb14890c0e85904cf
78,193
import math def MCC_calc(TP, TN, FP, FN): """ Calculate MCC (Matthews correlation coefficient). :param TP: true positive :type TP : int :param TN: true negative :type TN : int :param FP: false positive :type FP : int :param FN: false negative :type FN : int :return: MCC as float """ try: result = (TP * TN - FP * FN) / \ (math.sqrt((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN))) return result except ZeroDivisionError: return "None"
bb11c6eac895692221ed444af64762831ffd92d9
384,070
def create_track_dict(loved_track): """Method that creates a dictionary for a given LovedTrack object from LastFM API in such format: {'artist': artist-name, 'title': track-title}""" return {'artist': loved_track.track.artist.get_name(), 'title': loved_track.track.title}
3f2090e077cfbcbeaf561a0779c156502fb0960d
159,557
import itertools def test_product_combinator(container_, repeat_times): """ cartesian product, equivalent to a nested for-loop product('ABCD', repeat=2) AA AB AC AD BA BB BC BD CA CB CC CD DA DB DC DD """ return list(itertools.product(container_, repeat=repeat_times))
647a390bb8effe08132a6e506fdf1aca69289e7f
161,585
def get_data_center(hostname): """Guess data center from Keeper server hostname hostname(str): The hostname component of the Keeper server URL Returns one of "EU", "US", "US GOV", or "AU" """ if hostname.endswith('.eu'): data_center = 'EU' elif hostname.endswith('.com'): data_center = 'US' elif hostname.endswith('govcloud.keepersecurity.us'): data_center = 'US GOV' elif hostname.endswith('.au'): data_center = 'AU' else: # Ideally we should determine TLD which might require additional lib data_center = hostname return data_center
43a36cf6edb559361101a6444d9346b50254af0f
417,542
def krueger12_eta(lpc): """Ratio of 56Ni to total iron-peak yield from Krueger+ 2012 Fitting formula for K12 central density results. Based on looking at iron-peak yields from Khokhlov, Mueller & Hoflich 1993, I assign a flat prior eta = 0.9 below a central density of 1e+9 g/cm^3. Could probably do better by incorporating other results e.g. from the MPA group (Seitenzahl+ 2013). Input lpc (log10 of central density), output eta = MNi/(MNi + MFe). """ pc9 = 1e-9 * 10**lpc return min(0.95, 0.95 - 0.05*pc9), max(0.025, 0.03*pc9)
96f87a9c490b0ad0feff6859399977bc58f6b48a
36,928
def has_length(dataset): """ Checks if the dataset implements __len__() and it doesn't raise an error """ try: return len(dataset) is not None except TypeError: # TypeError: len() of unsized object return False
c39ebe97b05f00badc1290be64a11dfdbb28579a
380,629
def inverse(dict_): """ Return the inverse of dictionary dict_. (I.e. return a dictionary with keys that are the values of dict_, and values that are the corresponding keys from dict_.) The values of dict_ must be unique. """ idict = dict([(value,key) for key,value in dict_.iteritems()]) if len(idict)!=len(dict_): raise ValueError("Dictionary has no inverse (values not unique).") return idict
c814540835f4da3601824e42b0596068a25dd461
554,371
def create_url(event_id: int) -> str: """ Return the correct link to the event by its id :param event_id: id of the event :return: link to the event """ return f'https://olimpiada.ru/activity/{event_id}'
afa3443a158d00b9480adb358a16fc37125ec9de
101,282
import re def natural_key(string): """Key to use for "natural sorting". Based on https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/ Args: string: The input string to produce key for Returns: key to use for natural sorting in conjunction with Python's built-in sort. """ return [int(c) if c.isdigit() else c for c in re.split(r'(\d+)', string)]
638ecb3a110dd00521dc1e773e4e1654db2ecbcd
212,570
def aumentar(p=0, tax=0): """ -> Aumenta o valor inicial em uma porcentagem definida pelo usuário :param p: valor inicial :param tax: valor da taxa :return: valor inicial somada a taxa """ res = p + (p * tax/100) return res
75b8fbaddbc7bfa464a47c8f2d0a38ff49242151
427,397
def clean_col(col): """ This function cleans the column names and add the year information """ if col == 'Unnamed: 0': # Special case for contry column return 'Country' elif col[0] == 'U': # Take care for the annual summary column return 'Total, {}'.format(str(int(col.split(':')[1])//17 + 2014)) elif (len(col.split('.')) == 1) & (col[0] != 'U'): return col + ', 2015' elif col.split('.')[1] == '1': return col.split('.')[0]+', 2016' elif col.split('.')[1] == '2': return col.split('.')[0]+', 2017' elif col.split('.')[1] == '3': return col.split('.')[0]+', 2018' elif col.split('.')[1] == '4': return col.split('.')[0]+', 2019' elif col.split('.')[1] == '5': return col.split('.')[0]+', 2020'
0ee72161fafacf0aa7c9d7cc5d1d40ad11ede33e
419,042
def is_number(s): """ Checks if the given string is an int or a float. Numbers with thousands separators (e.g. "1,000.12") are also recognised. Returns true of the string contains only digits and punctuation, e.g. 12/23 """ try: float(s) is_float = True except ValueError: is_float = False is_only_digits_or_punct = True for ch in s: if ch.isalpha(): is_only_digits_or_punct = False break return is_float or is_only_digits_or_punct
5d17e85f7c0fd53fa184774bb05976457c05853b
260,394
import time from datetime import datetime import pytz def parse_datetime(dt_str, format): """Create a timezone-aware datetime object from a datetime string.""" t = time.strptime(dt_str, format) return datetime(t[0], t[1], t[2], t[3], t[4], t[5], t[6], pytz.UTC)
fbe341216c52db7f09add26ff9575caa44de1cb8
155,046
def get_parent_position(position: int) -> int: """ heap helper function get the position of the parent of the current node >>> get_parent_position(1) 0 >>> get_parent_position(2) 0 """ return (position - 1) // 2
f9c282f608033b2eb2604d981bf7c8a141e1beab
276,080
from functools import reduce def getitems(obj, items, default=None): """ Get items from obj :param obj: object :param items: string or iterable object :param default: default value :return: obj item value if existed, otherwise default value """ if isinstance(items, str): items = (items,) try: return reduce(lambda x, i: x[i], items, obj) except (IndexError, KeyError): return default
94828589d01481a5d7d1cddc1abd6972d11656e9
457,975
def isHtml(res): """ Determines whether or not a response dictionary contains raw HTML """ return res["error"] is None \ and res["status"] == 200 \ and "text/html" in res["type"]
6c3e0bde091989c1faa42390d7964aa3ebfb1973
551,953
def remove_cover_attachment_previews(cover_attachment): """ Remove the previews element from cover_attachment. It contains a lot of data. Then, return. """ del cover_attachment["previews"] return cover_attachment
26ba731ae29cabae958d89bb1d912218b41154c6
527,713
def change2(line, old, new): """replace line with new if line.startswith(old)""" if line.startswith(old): return new + "\n" return line
6ce0288a1dc592f66cb89d9cd1d23f2288d6e5e5
180,937
import filecmp def files_match(file1: str, file2: str): """Compares two files, returns True if they both exist and match.""" # filecmp.cmp does not invoke any subprocesses. return filecmp.cmp(file1, file2, shallow=False)
bdb4053d7b10284315b59e5e751ea0e04d8fa4d7
359,935