content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import math def arclen(angle, radius, rad=False): """Calculates the size of an arc of a circle""" if rad: angle = math.degrees(angle) return (angle / 360) * (2 * math.pi * radius)
c94e3a0f838a4ee635da4997da3ac89867d03366
700,051
import torch def precision(label_pred, label_gt) -> float: """ Computes the precision """ with torch.no_grad(): prediction_bin = torch.argmax(label_pred, dim=1) TP = torch.mul(prediction_bin, label_gt).sum() FP = torch.mul(prediction_bin, 1 - label_gt).sum() PC = float(TP) / (float(TP + FP) + 1e-6) return PC
039e4c938d5ef6fd27ecda1d1bf027863b973060
700,052
def get_links(soup, artist): """Get the complete link for each song of the artist""" links = f'links_{artist}' links = [] for td in soup.find_all('td'): if "tal" in td.get('class',[]): links.append('https://www.lyrics.com'+td.find('a')['href']) return links
0587286e3bd15abdee0403745b5fc6d5af81ce5c
700,053
import hashlib def hash_seqs(sequences): """ Generates hexdigest of Sha1 hash for each sequence in a list of sequences. This function is useful for generating sequence specific identifiers that allow for easier comparison of features from multiple sequencing runs or sequence processing runs. """ new_sequences = list() for seq in sequences: # get sequence string and encode using UTF-8 for consistency seq = seq.encode('UTF-8') # get sha1 hash of sequence hash_ = hashlib.sha1() hash_.update(seq) hash_hex = hash_.hexdigest() new_sequences.append(hash_hex) return new_sequences
35c3291a58ebc7e053250f7234faacd0356f7df5
700,055
def get_paths(link, nb): """ Generate a list containing all URLs Args: link [str]: Base HTML link nb [int]: Number of pages usingHTML link Returns: url [str]: [List containing all URLs] """ url = [] for si in range(2000, 2020): for ti in range(1, nb+1): result = link + str(si) + "-" + str(si+1) + "&teamId=" + str(ti) url.append(result) return url
8fd0a947eeb5435f0df48dc928feb3a10786c2cc
700,056
def heaviside(x, bias=0): """ Heaviside function Theta(x - bias) returns 1 if x >= bias else 0 :param x: :param bias: :return: """ indicator = 1 if x >= bias else 0 return indicator
b325a862cbc2cac97b8e4808c6d77b54a0f1d643
700,057
def biggest_differences_words(prunedTable): """ Finds the words that are most different from their most frequent alternative across each semantic dimension Parameters ---------- prunedTable : a data frame The data frame representing arousal, valence, and dominance ratings for words and their most frequent alternatives across time within a paragraph Returns ------- a dictionary mapping from a semantic dimension to row indexing information about the word with the greatest difference for that dimension """ prunedTable = prunedTable.assign(absADiff = (prunedTable['WordA'] - prunedTable['AltA']).abs(), absVDiff = (prunedTable['WordV'] - prunedTable['AltV']).abs(), absDDiff = (prunedTable['WordD'] - prunedTable['AltD']).abs()) biggestDifferencesWords = {'Arousal': prunedTable.loc[prunedTable['absADiff'].idxmax()], 'Valence': prunedTable.loc[prunedTable['absVDiff'].idxmax()], 'Dominance': prunedTable.loc[prunedTable['absDDiff'].idxmax()]} return biggestDifferencesWords
2b39a717fbdf7d823a381ff3320e2ac487f65ec3
700,058
def build_help_text(command_class): """Generate help text from a command class.""" command = command_class() parser = command.create_parser({}) return parser.format_help()
3fc8491e37db2f0b96144ad0b34723ceb71a51ca
700,060
import io def read_names(f): """Read names, specified one per line, from a file.""" return (line.strip() for line in io.open(f, 'r', encoding='utf-8'))
98724005ef5c647aa31205bc8afd07da50ece002
700,061
def format_cols_2digit(df, skip_last_col=True): """Formats a dataframes columns so that numbers are always two-digits (padded with 0) Parameters ---------- df : pandas DataFrame Input DataFrame. skip_last_col : bool A special case, where the very last column contains text, rather than a number, and should be excluded. Returns ------- reformatted_cols : list The DataFrame columns, reformatted with padded 0 to make 2 digits. """ if skip_last_col: # reformat the columns to be padded stringnumbers. (exclude the last "Contains_Data") reformatted_cols = ["%02d" % col for col in df.columns[:-1]] # add last column back to list reformatted_cols.append(df.columns[-1]) else: # reformat the columns to be padded stringnumbers. (exclude the last "Contains_Data") reformatted_cols = ["%02d" % col for col in df.columns] return reformatted_cols
5c9bc98ed6298f8f3d181432320cc69dc4c30ea2
700,062
def ec_filter(query_list, hit_list, ec_number): """ """ passed_query = query_list.copy() for i, query in enumerate(query_list): anno = hit_list[i] anno_ec_number = anno.split("|")[0].replace(">EC:", "").split(".") if len(anno_ec_number) < 3: passed_query.pop(i) elif ec_number == "_".join(anno_ec_number): continue else: passed_query.pop(i) return set(passed_query)
b8b66f19f79dc60cd2d044f4bcfdd46d3a3908b7
700,063
def reverse_complement_no_loops(seq): """Return WC complement of a base without loops""" # Initialize rev_seq to a lowercase seq rev_seq = seq.lower() # Substitute bases rev_seq = rev_seq.replace('t','A') rev_seq = rev_seq.replace('a','T') rev_seq = rev_seq.replace('g','C') rev_seq = rev_seq.replace('c','G') return rev_seq[::-1]
2a94d38827cbda95272776b0bea652af9d30b64d
700,064
from collections import OrderedDict def update(data_df, cal_dict, param, bound, start, end): """Update calibration times for give parameter and boundary""" if param not in cal_dict["parameters"]: cal_dict["parameters"][param] = OrderedDict() if bound not in cal_dict["parameters"][param]: cal_dict["parameters"][param][bound] = OrderedDict() cal_dict["parameters"][param][bound]["start"] = start cal_dict["parameters"][param][bound]["end"] = end return cal_dict
f8c63a528ad6533938f215179c17bd236eefeb67
700,065
def viaCombusta(obj): """ Returns if an object is in the Via Combusta. """ return 195 < obj.lon < 225
531ab5b5725bf3ed6dc8c8ff99c3980b3533e558
700,066
def label_name(condition): """format label name """ label = condition.replace("_", " ").split(".") label[0] = label[0].capitalize() return " ".join(label)
06ea2caf9bb881aa016ef3f74999fe376d30cb05
700,067
def copresence(acc, w1, w2): """Results 1 if a pair of figures is on stage at the same time, and 0 otherwise.""" return int(acc + w1 + w2 > 0)
3056b25df4a59bc421a3aec3d33e25db8ccb98bd
700,068
def key_line(cfgmap, key): """Get the line number of `key` in `cfgmap`.""" if not hasattr(cfgmap, "key2line"): key2line = {} for cfgkey in cfgmap: key2line[cfgkey] = getattr(cfgkey, "line", -1) cfgmap.key2line = key2line return cfgmap.key2line.get(key, -1)
94649a30f949e868610426642699e9ac43b644be
700,069
def gen_bool_parse(val): """Convert a string, as a human might type it, to a boolean. Unrecognized values raise an exception. """ val = val.strip() if not val: return False try: return bool(int(val)) except: pass ch = val[0] if ch in {'t', 'T', 'y', 'Y'}: return True if ch in {'f', 'F', 'n', 'N'}: return False raise ValueError('"%s" does not look like a boolean' % (val,))
3c1944c7633f329848569c6cb29af11e145dfa5c
700,070
import glob def _expand_glob_path(file_roots): """ Applies shell globbing to a set of directories and returns the expanded paths """ unglobbed_path = [] for path in file_roots: try: if glob.has_magic(path): unglobbed_path.extend(glob.glob(path)) else: unglobbed_path.append(path) except Exception: unglobbed_path.append(path) return unglobbed_path
f765e3063f098d2bf185df619783b428b192b37a
700,071
def nth_triangle_number(n): """ Compute the nth triangle number """ return n * (n + 1) // 2
76ebd412200a04ae8a1bf5c4d18122db01cee526
700,073
import re def check_ignore(item, ignores=[]): """ take a string (item) and see if any of the strings in ignores list are in the item if so ignore it. """ ignore = False for i in ignores: if i and re.search(i, str(item)): # print "ignoring item: %s for ignore: %s" % (item, i) ignore = True return ignore
0d31b2ef2ddbe48a4de7f743c412b1a72a19b774
700,074
def all_segments(N): """ Return (start, end) pairs of indexes that orm segments of tour of length N """ return [(start, start + length) for length in range(N, 2-1, -1) for start in range(N - length + 1)]
d1b70d4f52c930e97ff82920abbf49f5b6d0af56
700,075
def normalize_mac(mac): """Remove '-' and ':' characters and lowercase the MAC string. :param mac: MAC address to normalize. :return: Normalized MAC address string. """ return mac.replace('-', '').replace(':', '').lower()
9ba40e96f01274eb764f47078e22de34dddfd024
700,076
import torch def epe(input_flow, target_flow): """ End-point-Error computation Args: input_flow: estimated flow [BxHxWx2] target_flow: ground-truth flow [BxHxWx2] Output: Averaged end-point-error (value) """ return torch.norm(target_flow - input_flow, p=2, dim=1).mean()
ff68a331c1f3323585c6a351b4a3da50209ab9b9
700,077
def install_command(package): """ 安装命令组合 :param package: 安装包 :return: 返回完整安装命令 """ cmd_ = "pacman -S --noconfirm {0}".format(package) return cmd_
55f8a67a2abebc3204af1f5a1d2e390df69ef999
700,078
from typing import Iterable def filter_array(func, arr: Iterable) -> list: """ Filters the arr using the given function. The function must return True or False whether the element should be part of the result or not. """ res = list() for el in arr: if func(el): res.append(el) return res
53e1db35e1876475efa1427aefc4b6728d97087e
700,079
def slices(series: str, length: int) -> list: """slices - a.k.a Grouped Slices - :param series: str: :param length: int: :returns: A list of grouped slices of n length from a string """ if length not in range(len(series) + 1): raise ValueError(f'Length {length} not in range for this series') return [series[tailcut - length:tailcut] for tailcut in range(length, len(series) + 1)]
53a15a0b6322a22b95fc8943fbd7546da4419a77
700,080
def s2human(time): """Convert a time in second into an human readable string""" for delay, desc in [(86400,'d'),(3600,'h'),(60,'m')]: if time >= delay: return str(int(time / delay)) + desc return str(int(time)) + "s"
a2d2264fde357534e52444b754de81398eeacea7
700,081
def get_short_name(username): """ 根据openid查询展示的uin """ return username
e1062fb0b4d58c052ce60abc9081ec6bb211c006
700,082
from typing import List from typing import Dict def group(answers: List[str]) -> Dict[int, List[str]]: """ group an answers into group-of-word-length :param answers: :return: """ answers.sort() # sort answers_group = {} for item in answers: n = len(item) # define key value if it doesn't exists if n not in answers_group: answers_group[n] = [] answers_group[n].append(item) return answers_group
8ffec0f8e1a1c32d8f28d4364cf9afac6c3e544f
700,083
from sys import version def get_version(): """ Returns current version of this script. The version is an integer number and it must be updated after each modification of the script. Parameters ---------- data : ndarray an array with experiment data config : Object configuration object providing access to configuration parameters data_dir : str a directory where 'alien_analysis' subdirectory will be created to save results of analysis if configured Returns ------- data : ndarray data array without aliens """ return version
2dfa42ac9eae9e590b5e6401a78ab8a281b1a454
700,084
def min_edit_distance(word1, word2): """ :type word1: str :type word2: str :rtype: int """ n = len(word1) m = len(word2) # 有一个字符串为空串 if n * m == 0: return n + m # DP 数组 D = [[0] * (m + 1) for _ in range(n + 1)] # 边界状态初始化 for i in range(n + 1): D[i][0] = i for j in range(m + 1): D[0][j] = j # 计算所有 DP 值 for i in range(1, n + 1): for j in range(1, m + 1): left = D[i - 1][j] + 1 down = D[i][j - 1] + 1 left_down = D[i - 1][j - 1] if word1[i - 1] != word2[j - 1]: left_down += 1 D[i][j] = min(left, down, left_down) return D[n][m]
8504dcb903176e745ac24babd05b5e91af9088ca
700,085
def Ql_from_Qi_Qe(Qi, Qe): """ 1/Ql = 1/Qi+1/Qe """ Ql = 1/(1/Qi+1/Qe) return Ql
500ea9877d13f70ac8f51bd6add9ffc792ee7de0
700,086
def _depth_to_percentile_normalized_disp(depth): """This performs the same steps as normalize_depth_for_display from the SfMLearner repository, given the default options. This treads every image in the batch separately. """ disp = 1 / (depth + 1e-6) disp_sorted, _ = disp.flatten(1).sort(1) idx = disp_sorted.shape[1] * 95 // 100 batch_percentiles = disp_sorted[:,idx].view(-1, 1, 1, 1) disp_norm = disp / (batch_percentiles + 1e-6) return disp_norm
74cf3a7d04e7dbed59860eef4c1abcbc6f27b33b
700,087
def i_priority_node(g, i): """ Returns all nodes of priority i in game graph g. :param g: the game graph. :param i: the requested priority. :return: a list of nodes of priority i in g. """ nodes = g.nodes # Nodes from g # get all node indexes in node tuple (index, (node_player, node_priority)) when node_priority is i return [k for k, v in nodes.iteritems() if v[1] == i]
4d81fab7c7ea7ac75d21dfa36735b1e9a8981444
700,088
import warnings def make_sklearn_compat(op): """This is a deprecated method for backward compatibility and will be removed soon""" warnings.warn( "sklearn_compat.make_sklearn_compat exists for backwards compatibility and will be removed soon", DeprecationWarning, ) return op
da6881d324549258cb185be6cadbcb2e795ea655
700,089
def sum_path(G, path): """ Calculate sum of weight in each edges of `path` """ sum_weight = 0 for i in range(len(path)-1): n1, n2 = path[i], path[i+1] sum_weight += G[n1][n2]['weight'] return sum_weight
324c9d99c609da742ab71ad43714ec02d4f4d78c
700,090
def split_doc(d): """Split sentences in a document and saved the sentences to a list. Args: d: a document final_d: a list of sentences """ d = d.strip().split(".") # split document by "." to sentences final_d = [] for s in d: if s != "": # ignore if the sentence is empty final_d.append(s.strip()) return final_d
85726c04edbc94ec28e737050c0e508b54b59e5e
700,091
def build_history_object(metrics): """ Builds history object """ history = {"batchwise": {}, "epochwise": {}} for matrix in metrics: history["batchwise"][f"training_{matrix}"] = [] history["batchwise"][f"validation_{matrix}"] = [] history["epochwise"][f"training_{matrix}"] = [] history["epochwise"][f"validation_{matrix}"] = [] return history
1062b05b6ec5fb0126b85eb06270ccbd0cc2d468
700,092
def is_ccw(signed_area): """Returns True when a ring is oriented counterclockwise This is based on the signed area: > 0 for counterclockwise = 0 for none (degenerate) < 0 for clockwise """ if signed_area > 0: return True elif signed_area < 0: return False else: raise ValueError("Degeneracy: No orientation based on area")
bd0e0d92913dcb1c895c36c6e724e454e3658a6d
700,093
def igv_test_tracks(igv_public_track): """Returns a list with test tracks for igv.js""" return [igv_public_track]
58b1de343f6b9aa4d4274fd9a67fd40021d774ec
700,095
import argparse def parse_args(): """ build arguments :return args: input arguments """ parser = argparse.ArgumentParser(description="Flack to wave arguments.") parser.add_argument("--wav_dir", type=str, default="../../data/LibriSpeech/wav", required=False, help="LibriSpeech wav directory") args = parser.parse_args() return args
f3c5e8f3636ce91be3042feae8e385ebd2b25ce0
700,096
def from_row_num_to_track_id(df, row_num): """ df must have a 'track_id' column """ return df.iloc[row_num].track_id
db34b53be6a74e8a0fa20394f014346dc8d654d0
700,097
def age_window_hit(by_predicted, by_truth): """ calculates the window for a given truth and checks if the prediction lies within that window :param by_predicted: the predicted birth year :param by_truth: the true birth year :return: true if by_predicted within m-window of by_truth """ m = -0.1 * by_truth + 202.8 return int(by_truth - m) <= by_predicted <= int(by_truth + m)
0d5903d21006f2651114affa9179cfc063b25f1d
700,098
import sqlite3 def get_con_cur(db_filename): """Returns an open connection and cursor associated with the sqlite database associated with db_filename. Args: db_filename: (str) the filename of the db to which to connect Returns: a tuple of: -an open connection to the sqlite database -an open cursor associated with the connection """ con = sqlite3.connect(db_filename) cur = con.cursor() return (con, cur)
5b99bb2df4f5a59a89d842f125a04252b86aab38
700,099
def small_straight(dice): """Score the given roll in the 'Small Straight' category. """ if sorted(dice) == [1, 2, 3, 4, 5]: return sum(dice) else: return 0
4b88652b32efd49d5d4247ce88584011a43a0b10
700,100
def format_internal_tas(row): """Concatenate TAS components into a single field for internal use.""" # This formatting should match formatting in dataactcore.models.stagingModels concatTas tas = ''.join([ row['allocation_transfer_agency'] if row['allocation_transfer_agency'] else '000', row['agency_identifier'] if row['agency_identifier'] else '000', row['beginning_period_of_availa'] if row['beginning_period_of_availa'].strip() else '0000', row['ending_period_of_availabil'] if row['ending_period_of_availabil'].strip() else '0000', row['availability_type_code'].strip() if row['availability_type_code'].strip() else ' ', row['main_account_code'] if row['main_account_code'] else '0000', row['sub_account_code'] if row['sub_account_code'] else '000' ]) return tas
0a1db8f1958d3ee1f06b323f9d00de66814e2a6b
700,102
def form(self, lab="", **kwargs): """Specifies the format of the file dump. APDL Command: FORM Parameters ---------- lab Format: RECO - Basic record description only (minimum output) (default). TEN - Same as RECO plus the first ten words of each record. LONG - Same as RECO plus all words of each record. Notes ----- Specifies the format of the file dump (from the DUMP command). """ command = f"FORM,{lab}" return self.run(command, **kwargs)
68c2ec60889bac22a8f97789acb1586c41c60a06
700,103
def percent(values, p=0.5): """Return a value a faction of the way between the min and max values in a list.""" m = min(values) interval = max(values) - m return m + p*interval
80d3d291122d42e8b9936c4ef994e9ca1a7e98b5
700,104
from pathlib import Path import sys def check(database): """ Checks if the selected database exists. Args: database: name of the database. Returns: database: name of the database. """ if Path(database).is_file(): # Database found. return database else: # Database not found. sys.exit('\n' + database + ' does not exist.\n')
d1c8bc7f191b343d70d86853a644a380c4786978
700,105
def fns2dict(*functions) -> dict: """ Returns a dictionary of function name -> function, given functions as *arguments. Return: Dict[str, Callable] """ return {f.__name__: f for f in functions}
7ddfc5b5a99d016e13e66e4521d9f60b34051505
700,107
def cudify(x, use_cuda): """ Args x: input Tensor use_cuda: boolean """ if use_cuda: return x.cuda() else: return x
98987fadd057597c1396f50b3c0456edfb443507
700,108
def maxabs(vals): """convenience function for the maximum of the absolute values""" return max([abs(v) for v in vals])
ec79fe4de1aa658b40a7495f484b26493e5d8fc2
700,109
def get_row_sql(row): """Function to get SQL to create column from row in PROC CONTENTS.""" postgres_type = row['postgres_type'] if postgres_type == 'timestamp': postgres_type = 'text' return row['name'].lower() + ' ' + postgres_type
4efecaefa8b79bdeec7447138586cc93268c54df
700,110
def resolve_relative_path(filename): """ Returns the full path to the filename provided, taken relative to the current file e.g. if this file was file.py at /path/to/file.py and the provided relative filename was tests/unit.py then the resulting path would be /path/to/tests/unit.py """ r = __file__.rsplit("/", 1) # poor man's os.path.dirname(__file__) head = r[0] if len(r) == 1 or not head: return filename return "%s/%s" % (head, filename)
447df7fb94dbb3a0796c5207a99062b04dfbbf50
700,111
import torch def normalize(x: torch.Tensor) -> torch.Tensor: """Normalizes a vector with its L2-norm. Args: x: The vector to be normalized. Returns: The normalized vector of the same shape. """ norm = x.pow(2).sum(1, keepdim=True).pow(1.0 / 2) out = x.div(norm) return out
f34e664a565953e46c9cb18cc66fce0dd9903bde
700,112
def marginal_effect(cm_dict, reference, protected): """ Calculates the marginal effect as a percentage difference between a reference and a protected group: reference_percent - protected_percent. Prints intermediate values. Tightly coupled to cm_dict. :param cm_dict: Dict of confusion matrices containing information about reference and protected groups. :param reference: Name of reference group in cm_dict as a string. :param protected: Name of protected group in cm_dict as a string. :return: Marginal effect value. """ # reference group summary reference_accepted = float(cm_dict[reference].iat[1, 0] + cm_dict[reference].iat[1, 1]) # predicted 0's reference_total = float(cm_dict[reference].sum().sum()) reference_percent = 100 * (reference_accepted / reference_total) print(reference.title() + ' accepted: %.2f%%' % reference_percent) # protected group summary protected_accepted = float(cm_dict[protected].iat[1, 0] + cm_dict[protected].iat[1, 1]) # predicted 0's protected_total = float(cm_dict[protected].sum().sum()) protected_percent = 100 * (protected_accepted / protected_total) print(protected.title() + ' accepted: %.2f%%' % protected_percent) # return marginal effect return reference_percent - protected_percent
4c7ff2e6fa9746bd9b0bd152c2dba25ea3d358a9
700,113
def Sigma_functional_form(func_type='well-behaved'): """ Get line with the correct functional form of Sigma(w) """ if func_type == 'power law': form = r'$\Sigma(w)=w^{-\frac{1}{\sigma}}$' elif func_type == 'truncated': form = (r'$\Sigma(w)' r'=\big{(}\frac{1}{w}+B\big{)}^{-B d_f+C}' r'exp\big{(}\frac{d_f}{w}\big{)}$') elif func_type == 'well-behaved': form = (r'$\Sigma(w)=w^{B d_f-C}' r'exp\big{(}\frac{d_f}{w}+B C w\big{)}$') elif func_type == 'pitchfork': form = (r'$\Sigma(w)=w^{B d_f}' r'(1+Bw^2)^{-\frac{B d_f}{2}}' r'exp\big{(}\frac{d_f}{2w^2}+\frac{C}{w}' r'+\sqrt{B}CArcTan[\sqrt{B}w]\big{)}$') else: print('functional form requested not recognized') form = 'unknown functional form' return form
691aca26f2611835fc4870cb2dd09a40c0b155e4
700,114
import uuid def label(project): """Label fixture for project label API resource tests.""" _id = uuid.uuid4().hex data = { "name": f"prjlabel{_id}", "description": f"prjlabel1 {_id} description", "color": "#112233", } return project.labels.create(data)
61d9ca8e6a9c909f3bc97135796a2cf03de99b35
700,115
import math def calculer_distance(point1, point2): """calculer la distance entre les 2 points""" diffx = math.pow(point1['x'] - point2['x'], 2) diffy = math.pow(point1['y'] - point2['y'], 2) return math.sqrt(diffx + diffy)
d4513f387ccd56f9ed535b392f6a6fa36ea2a81d
700,117
import torch def triu(input_, k=0): """Wrapper of `torch.triu`. Parameters ---------- input_ : DTensor Input tensor k : int, optional Offset to main diagonal, by default 0 """ return torch.triu(input_._data, k)
07d9a370e6a33eb2998d0fb4f0c97940f7e0595e
700,118
import re def camel_case_to_underscore(name): """Converts string from camel case notation to underscore. :param name: String to convert to underscore. :type name: string :return: A string converted from camel case to underscore. :rtype: string """ s1 = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
741753a4033c4ff08af3a55c5b600b3c08d46c8f
700,121
def vfid_set(session, vfid): """Assign a new VFDI to a session :param session: dictionary of session returned by :func:`login` :param vfid: new VFID to be assigned to the session :rtype: none """ session['vfid'] = vfid return "Success"
00f17adefa2d24bfcd6a1e1f1a24acfe88873dab
700,123
def import_list(filepath): """imports list from a file, takes a filepath, returns a list""" txt = open(filepath, "r") shuffled = txt.read().splitlines() txt.close() return shuffled
548866597e0d9899ecdd536c55ef7f9f8ce24688
700,124
def bool_list_item_spec(bool_item_spec): """A specification for a list of boolean items.""" return { 'my_bools': { 'required': True, 'items': bool_item_spec } }
8bb609015004b6eb12d182b07731368b107ec602
700,125
def total_seconds(td): """Python 2.7 adds a total_seconds method to timedelta objects. See http://docs.python.org/library/datetime.html#datetime.timedelta.total_seconds This function is taken from https://bitbucket.org/jaraco/jaraco.compat/src/e5806e6c1bcb/py26compat/__init__.py#cl-26 """ try: result = td.total_seconds() except AttributeError: result = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6 return result
49db7ae90dd2d2d775716b86cab044964217079b
700,126
def is_valid_ip(ip: str) -> bool: """Checks if ip address is valid Examples: >>> assert is_valid_ip('12.255.56.1') >>> assert not is_valid_ip('1.1.1') """ octets = ip.split(".") if not octets or len(octets) != 4: return False return all(map(lambda octet: octet in map(str, range(256)), octets))
ded3fa902b869ef8320247a0cfa47f39032c38d6
700,127
import argparse def build_parser(): """Build argument parser.""" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) # Required args parser.add_argument("--in_gct_path", "-i", required=True, help="path to input gct file") # Optional args parser.add_argument("--in_gct2_path", "-i2", help="path to second gct file") parser.add_argument("--out_name", "-o", default="steep_output.gct", help="what to name the output similarity file") parser.add_argument("--similarity_metric", "-s", default="spearman", choices=["spearman", "pearson"], help="similarity metric to use for comparing columns") parser.add_argument("--verbose", "-v", action="store_true", default=False, help="whether to increase the # of messages reported") return parser
1955ad29db2d6970db3d69e19efa0fecac63940e
700,128
import os import re def get_version (paths=None): """ paths: a VERSION file containing the long version is checked for in every directory listed in paths. """ if None == paths : # by default, get version for myself pwd = os.path.dirname (__file__) root = "%s/.." % pwd paths = [root, pwd] if not isinstance (paths, list) : paths = [paths] long_version = None short_version = None branch_name = None # if in any of the paths a VERSION file exists, we use the long version # in there. for path in paths : try : filename = "%s/VERSION" % path with open (filename) as f : line = f.readline() line.strip() pattern = re.compile ('^\s*(?P<long>(?P<short>[^-@]+?)(-[^@]+?)?(?P<branch>@.+?)?)\s*$') match = pattern.search (line) if match : long_version = match.group ('long') short_version = match.group ('short') branch_name = match.group ('branch') # print 'reading %s' % filename break except Exception as e : # ignore missing VERSION file -- this is caught below pass if long_version : return short_version, long_version, branch_name # check if any one worked ok if None == long_version : raise RuntimeError ("Cannot determine version from %s" % paths)
5e9515b86f42e4dc39ab7f8ca2426bd949e1f9eb
700,129
import numpy def arcsec(val): """ Inverse secant """ return numpy.arccos(1. / val)
9b232ed81368a5abd2e7f340eaca697491f48074
700,130
def is_type_of(value, other): """Type check""" return isinstance(value, other)
b9c21df5cf75ec86e941182d553eaae2fec2eb38
700,131
def class_fullname(obj): """Returns the full class name of an object""" return obj.__module__ + "." + obj.__class__.__name__
a7b5915e15122664943a181a48d3f52dff232c88
700,132
from typing import List def split_users(grouped_data: List[str]) -> List[List[str]]: """ Group user groups into votes per person """ return [value.split(" ") for value in grouped_data]
ea61cedff0441a420ccbcb32584bf294348ac15a
700,133
def strxor(a, b): """ Realiza a operação xor para toda a string a e b sabendo qual das duas é maior """ # xor em duas strings de tamanhos diferentes if len(a) > len(b): return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a[:len(b)], b)]) else: return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b[:len(a)])])
c97b7eba2ce53c9bd136ebfabc762a5e733c3c4a
700,134
def get_model_ref(data, name_weights): """ Returns model reference if found by model name and model weights pair. Returns None otherwise. data - list of tuples (model_name, model_weights_path, model_ref) """ for x in data: if name_weights == x[:2]: return x[2] return None
8525d77c018ec696619161afb3fbb0342ff46a27
700,135
def parse_punishment(argument): """Converts a punishment name to its code""" punishments = { "none": 0, "note": 1, "warn": 1, "mute": 2, "kick": 3, "ban": 4 } return punishments[argument.lower()]
9ca9ad052c5636dd58f1b375296137de8b55712b
700,136
import re def varnames2matlab(name, tmodel): """ Transforms reaction variable pairs from `('ACALD','ACALD_reverse_xxxxx')` to `('F_ACALD','B_ACALD')` if it is a reaction, else leaves is as is :return: """ reverse_regex = re.compile(r'(.+_reverse)_[a-f0-9]{5}') new_name = name if new_name in tmodel.reactions: new_name = 'F_' + new_name else: test = reverse_regex.match(new_name) if test: new_name = 'R_' + test.groups()[0] return new_name
d7dbf917b6d84d41fc825e912cd8817a22d02eba
700,137
def get_total_fish(days): """ Returns a list of 7 elements containing the number of fish that would be present after "days" days """ countdowns = { 0: 1, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, } fish_count = [] for i in range(days+1): todays_fish = 0 new_fish = countdowns[0] for key in range(8): todays_fish += countdowns[key] countdowns[key] = countdowns[key+1] countdowns[6] += new_fish countdowns[8] = new_fish if i >= days-7: fish_count.append(todays_fish) return fish_count
ac2bb8d317ee6b9a7077429e55d0f2bf26f99ad2
700,138
import configparser def get_api_config(filename): """ Attempt to pull in twitter app API key and secret. If the key and secret don't exist prompt for them. Arguments: filename -- name of the config file to try and parse Returns: config_api_store -- contains the twitter API key and secret """ config_api_store = {} config_twiter_api = configparser.ConfigParser() config_twiter_api.read(filename) # Try and find the API key and secret in the config file try: config_api_store["CONSUMER_KEY"] = config_twiter_api['DEFAULT']['CONSUMER_KEY'] config_api_store["CONSUMER_SECRET"] = config_twiter_api['DEFAULT']['CONSUMER_SECRET'] # If we can't find them, prompt for them and write them in to the configuration file except KeyError: print("Visit https://apps.twitter.com/ to create an application and aquire these values (API key and API secret)") config_api_store["CONSUMER_KEY"] = input("Please enter a valid twitter app API key: ") config_api_store["CONSUMER_SECRET"] = input("Please enter a valid twitter app API secret: ") api_config_file = configparser.ConfigParser() api_config_file['DEFAULT'] = {'CONSUMER_KEY': config_api_store["CONSUMER_KEY"], 'CONSUMER_SECRET': config_api_store["CONSUMER_SECRET"]} with open(filename, 'w') as configfile: api_config_file.write(configfile) return config_api_store
8723e77f2cc30b9f102d141dd46b66a147ee67ef
700,139
def minspantree_helper(matrix, start_node, priority_queue, final_tree): """recursive helper function for question3() inputs: adjacency dictionary, value of start node, priority queue, and final adjacency dict output: final adjacency dict""" # iterate through edges of start node for tup in matrix[start_node]: # if edge destination is already in final tree, don't add it to priority queue # if edge destination is not in final tree, add edge to priority queue if final_tree and tup[0] in final_tree.keys(): continue elif not priority_queue: priority_queue.append((start_node, tup[0], tup[1])) else: i = 0 while i < len(priority_queue) and priority_queue[i][2] > tup[1]: i += 1 priority_queue.insert(i, (start_node, tup[0], tup[1])) # remove minimum edge from priority queue min_edge = priority_queue.pop() # add edge to final tree if start_node not in final_tree: final_tree[min_edge[0]] = [(min_edge[1], min_edge[2])] final_tree[min_edge[1]] = [(min_edge[0], min_edge[2])] else: final_tree[min_edge[0]].append((min_edge[1], min_edge[2])) final_tree[min_edge[1]] = [(min_edge[0], min_edge[2])] # remove edges from priority queue which travel to nodes already in the tree counter = 0 while counter < len(priority_queue): if priority_queue[counter][1] in final_tree.keys(): del priority_queue[counter] else: counter += 1 # return final tree if finished if len(matrix) == len(final_tree): return final_tree # if not, continue recursively with new edge's destination as the start node else: return minspantree_helper(matrix, min_edge[1], priority_queue, final_tree)
9f0b56d7f2e60c269744ef265e531154cc87600a
700,141
def get_blue(): """ create a blue (friendly) actor """ # return name of actor, grazing speed, self defense return 'Boar', 2
d0776c362ff841dfdd635f089d6dac0c81d13993
700,142
def make_data_row(url, item_name, scrape_datetime, soup_result_item): """ Returns dictionary """ # Dates and times scrape_date = scrape_datetime.date().isoformat() scrape_weekday = scrape_datetime.strftime('%A') scrape_time = scrape_datetime.time().isoformat() # Price data product_size = soup_result_item.get("data-displayname") sku_id = soup_result_item.get("data-sku") autodelivery_price = soup_result_item.get("data-adprice") standard_price = soup_result_item.get("data-price") sold_out = soup_result_item.get("data-issoldout") # Altogether data_row = { 'url': url, 'scrape_date': scrape_date, 'scrape_weekday': scrape_weekday, 'scrape_time': scrape_time, 'product_name': item_name, 'product_size': product_size, 'sku_id': sku_id, 'autodelivery_price': autodelivery_price, 'standard_price': standard_price, 'sold_out': sold_out, } return(data_row)
356c76bd3c6b86b711c9964f2fe14b629942b3c2
700,143
def const(a, b): """``const :: a -> b -> a`` Constant function. """ return a
1b3e03d98ab495d1795d3e89d0a57728b1dcef47
700,144
def add_tages_ratio(uid, userid_grouped, flag): """ 添加 tags 的比率 """ if flag == 0: return -1 df = userid_grouped[uid] if df.shape[0] == 0: return -1 else: return 1.0 * df[df['tags'] == ['None']].shape[0] / df.shape[0]
2ea37f3ddc16654eb1a840ae4327e5acde8cbe39
700,145
import torch def sharpness(predictions:list, total = True): """ Calculate the mean size of the intervals, called the sharpness (lower the better) Parameters ---------- predictions : list - predictions[0] = y_pred_upper, predicted upper limit of the target variable (torch.Tensor) - predictions[1] = y_pred_lower, predicted lower limit of the target variable (torch.Tensor) total : bool, default = True - When total is set to True, return overall sharpness - When total is set to False, return sharpness along the horizon Returns ------- torch.Tensor The shaprness, which depending on the value of 'total' is either a scalar (overall sharpness) or 1d-array over the horizon, in which case it is expected to increase as we move along the horizon. Generally, lower is better. """ assert len(predictions) == 2 y_pred_upper = predictions[0] y_pred_lower = predictions[1] if total: return torch.mean(y_pred_upper - y_pred_lower) else: return torch.mean(y_pred_upper - y_pred_lower, dim=0)
16c4fa826e9ffd4a42a3c987fc9fe6767feb9ebb
700,146
import os def find_last_checkpoint_version(path_to_logs: str): """Sort the log directory to pick the last timestamped checkpoint filename.""" def get_time_from_version_name(name: str): # name format example `version_16-10-2020_08-12-48` timestamp = name[6:] return timestamp ckpt_versions = os.listdir(path_to_logs) if len(ckpt_versions) == 0: return None ckpt_versions.sort(key=get_time_from_version_name) ckpt_dir = os.path.join(path_to_logs, ckpt_versions[-1], "checkpoints/") ckpt_epochs = os.listdir(ckpt_dir) if len(ckpt_epochs) == 0: return None ckpt_epochs.sort(key=lambda x: int(x[6:].split(".")[0])) # e.g. epoch=2.ckpt return os.path.join(ckpt_dir, ckpt_epochs[-1])
9a00bbc794aff0202c661286c4cc8b0d42193a8b
700,147
def unique(new_cmp_dict, old_cmp_dict): """Return a list dict of the unique keys in new_cmp_dict """ newkeys = set(new_cmp_dict) oldkeys = set(old_cmp_dict) unique = newkeys - oldkeys unique_ldict = [] for key in unique: unique_ldict.append(new_cmp_dict[key]) return unique_ldict
d67d356185b44718e3be788e37340b97a29df352
700,148
import click def common_gateway_options(func): """Supply common gateway options.""" func = click.option( "-v", "--protocol_version", help="Protocol version of the gateway.", default="2.2", show_default=True, )(func) func = click.option( "-s", "--persistence", help="Turn on persistence.", is_flag=True )(func) return func
7611356364201f357623a873f8e35a37cbf4ff9a
700,149
import asyncio async def _createServer(host, port): """ Create async server that listens host:port, reads client request and puts value to some future that can be used then for checks :return: reference to server and future for request """ indicator = asyncio.Future() async def _handle(reader, writer): raw = await reader.readline() request = raw.decode("utf-8") indicator.set_result(request) server = await asyncio.start_server(_handle, host, port) return server, indicator
bbd21ede887ae93ba8127aa1bb0a9ff4264b8399
700,150
def interval(*intervals): """Decorate a function to be called by the bot every *n* seconds. :param int intervals: one or more duration(s), in seconds This decorator can be used multiple times for multiple intervals, or multiple intervals can be given in multiple arguments. The first time the function will be called is *n* seconds after the bot was started. Plugin functions decorated by ``interval`` must only take :class:`bot <sopel.bot.Sopel>` as their argument; they do not get a ``trigger``. The ``bot`` argument will not have a context, so functions like ``bot.say()`` will not have a default destination. There is no guarantee that the bot is connected to a server or in any channels when the function is called, so care must be taken. Example:: from sopel import plugin @plugin.interval(5) def spam_every_5s(bot): if "#here" in bot.channels: bot.say("It has been five seconds!", "#here") """ def add_attribute(function): function._sopel_callable = True if not hasattr(function, "interval"): function.interval = [] for arg in intervals: if arg not in function.interval: function.interval.append(arg) return function return add_attribute
4d580cea0853228896c8dec2308a243376c56770
700,151
def _exclude_swift_incompatible_define(define): """A `map_each` helper that excludes a define if it is not Swift-compatible. This function rejects any defines that are not of the form `FOO=1` or `FOO`. Note that in C-family languages, the option `-DFOO` is equivalent to `-DFOO=1` so we must preserve both. Args: define: A string of the form `FOO` or `FOO=BAR` that represents an Objective-C define. Returns: The token portion of the define it is Swift-compatible, or `None` otherwise. """ token, equal, value = define.partition("=") if (not equal and not value) or (equal == "=" and value == "1"): return token return None
9ce87f52f8829636364e2671f59a0eb9e66f5a9b
700,152
def commands_almost_equal(command1: str, command2: str, delta: float = 1.0) -> bool: """Check if two commands are almost equal. Almost equal means we allow numerical parts in the commands to differ by the defined delta. Args: command1 (str): first command. command2 (str): second command. delta (float): allowed numerical delta. Returns: bool: if the two commands are almost equal. """ if command1 == command2: return True command1_parts, command2_parts = command1.split(), command2.split() if len(command1_parts) != len(command2_parts): return False for part1, part2 in zip(command1_parts, command2_parts): if part1 == part2: continue else: try: part1_int = int(part1) part2_int = int(part2) if abs(part1_int - part2_int) <= delta: continue else: return False except ValueError: return False return True
4d75afadb2b6db5911a227d205c81f4cbfdc7f01
700,153
import re def hex_color_code(value: str): """ Hex color validator Example Result: [#00ff00, #fff] """ _hex_color_pat = r'#(?:[A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})' return re.findall(_hex_color_pat, value)
760db0bd1b729b62171b6964d1615546e32dbe52
700,154
import re import logging def parse(fqdn): """Parses an M-Lab FQDN into its constituent parts. Args: fqdn: str, an M-Lab FQDN e.g., ndt-iupui-mlab1-den05.mlab-oti.measurement-lab.org Returns: dict representing the constituent parts. """ # This regex *should* match all valid M-Lab domain names, for both nodes # and experiments, for both v1 and v2 names. It makes use of non-capturing # groups denoted by '(?:)'. What is interesting is that you can specify # capturing groups inside of non-capturing groups. regex = '(?:([a-z]+)(?:[.-]([a-z]+))?[.-])?(mlab[1-4])[.-]([a-z]{3}[0-9ct]{2})(?:\.(mlab-[a-z]+))?\.(.*)$' matches = re.match(regex, fqdn) if not matches or len(matches.groups()) != 6: logging.error('Failed to parse FQDN: %s', fqdn) return {} parts = list(matches.groups()) fqdn_parts = { 'experiment': parts[0], 'org': parts[1], 'machine': parts[2], 'site': parts[3], 'project': parts[4], 'domain': parts[5], } return fqdn_parts
a05a7125b1818668dc681be460a326c2a5a2f065
700,155
def buffer_type(request): """ Fixture that yields types that support the buffer protocol. """ return request.param
afc79bf3ac5bfeb53fe9cb8de707b2f0a93ae6f8
700,156
def minargmin(sequence): """Returns the minimum value and the first index at which it can be found in the input sequence.""" best = (None, None) for (i, value) in enumerate(sequence): if best[0] is None or value < best[0]: best = (value, i) return best
cf66ccd0dc76d3530fe7b2503bb3ed3b31c7ba61
700,157
def django_id_to_cloudsearch(s): """ convert haystack ids to legal cloudsearch index field names """ return s.replace('.', '__')
e70a5961b5189b1177b5ba2adeac3287ca2fc091
700,158
def _is_eqsine(opts): """ Checks to see if 'eqsine' option is set to true Parameters ---------- opts : dict Dictionary of :func:`pyyeti.srs.srs` options; can be empty. Returns ------- flag : bool True if the eqsine option is set to true. """ if "eqsine" in opts: return opts["eqsine"] return False
3515a75eb2c0976198700e1fe068cd15b0017d8f
700,159
from typing import Callable from typing import List def generate_definition(cls: Callable) -> List[str]: """Generates a function signature from a pyDantic class object""" # Fetch parameters params = cls.__annotations__ return [ f"{name}: {data_type}," if "Optional" not in data_type else f"{name}: {data_type} = None," for name, data_type in params.items() ]
2e0a40875f78eb07733fa94fbadbd1d5ee06f2c7
700,160
import itertools def _limited_walk(node, app): """Walk the tree like preorder, expand nodes iff placement is feasible.""" if node.check_app_constraints(app): return itertools.chain( [node], *[_limited_walk(child, app) for child in node.children] ) else: return [node]
663a107264e33115f0cbacf1e791db4b662db991
700,161