content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def nearest_point(points, point, exclude_index=None, exclude_elements=None): """Return nearest point of point. If exclude_index have all index return None. Args: points (list) : The list of points. point (Point) : The point to search the nearest. exclude_index (list) : The list of index excluded. exclude_elements (list) : The list of point excluded. Returns: int, point: index of point and the point """ pt_id, near_pt, min_distance = -1, None, float("+inf") for i, pt in enumerate(points): # Check if the point is in exclude list if exclude_index and i in exclude_index: continue # Dont check this point if exclude_elements and pt in exclude_elements: continue # Dont check this point # Check if it's nearest point dist = pt.distance_to(point) if dist < min_distance: min_distance = dist pt_id, near_pt = i, pt return pt_id, near_pt
718dc0d566b4d95245c29e0a7fc9e4a08aff42fb
31,388
from typing import Dict import os def load_by_prefix(prefix: str) -> Dict[str, str]: """Load env variables and filter by prefix.""" envs_pairs = filter( lambda item: item[0].startswith(prefix), os.environ.items() ) start_index = len(prefix) + 1 prepared_envs_pairs = map( lambda item: (item[0][start_index:], item[1]), envs_pairs ) return dict(prepared_envs_pairs)
9a7ca57e45a8ec83e0ffe36547d325c87c5c189d
31,389
def vertical_velocity_from_pass_diagnostics(diagnostic): """Method for handling a chain_pass diagnostic and outputting it's vertical velocity Shouldn't be calculated for pass chains of 1 pass vertical velocity = vertical distance / cumulative time """ vertical_distance = diagnostic[5] elapsed_time = diagnostic[9] pass_count = diagnostic[7] if pass_count > 1: return float(vertical_distance) / elapsed_time else: return None
8d9c0d49d9efd97870ad188c0ca8ad144a2ceb40
31,390
import os import hashlib def create_hash(password): """ https://nitratine.net/blog/post/how-to-hash-passwords-in-python/ :param password: plain password to be stored :return: salt and key to be stored in the db """ salt = os.urandom(32) # Remember this key = hashlib.pbkdf2_hmac( 'sha256', # The hash digest algorithm for HMAC password.encode('utf-8'), salt, 100000 ) return salt.hex(), key.hex()
e1eec581d446fd7a3cfdfc484ba2b8a3d76ca8dd
31,391
import os def filter_files(path, string): """ This function filter all files that contains the string passed as parameter in the name """ try: listing = os.listdir(path) return [f for f in listing if string in f] except: raise ValueError("Error in upy.contrib.tree.menu @ filter_files()")
b2b15927ffdc02660dec32d1aec19125b251b6d7
31,392
def adjust_time(times, time_zone): """ takes your times list and adjusts your availability for different time zones :param times: list of time blocks :param time_zone: number of hours to offset your availability :return: time zone shifted times list """ new_times = [] for time in times: if time: time_ary = time.split(":") new_hour = str(int(time_ary[0])+time_zone) try: new_times += [":".join([new_hour, time_ary[1]])] except ValueError: continue else: new_times += [u''] return new_times
c4570307ec43030884e5d6f7888fce5691a39ba0
31,393
def is_pkcs7_padded(binary_data): """Returns whether the data is PKCS 7 padded.""" # Take what we expect to be the padding padding = binary_data[-binary_data[-1]:] # Check that all the bytes in the range indicated by the padding are equal to the padding value itself return all(padding[b] == len(padding) for b in range(0, len(padding)))
6b462f979e4bb1ae2a4176d4c3e9d8378bd52e6d
31,394
from typing import Sequence from typing import List def _rescale(val: Sequence[float], low: float, high: float) -> List[float]: """ Rescales a list of confidence value between 0 and 1 to an interval [low, high]. Args: val (float): List of values in interval (0,1) low (float): Lower bound of rescaling interval high (float): Upper bound of rescaling interval Returns: Rescaled value (float). """ return [(high - low) * x + low for x in val]
60722f288fac88bae035aec7bc3cc73b96c282bb
31,395
def remove_pdb_and_cdr(df, pdbid, cdr): """ Removes a particular pdbid and cdr from the db. Returns the new df. """ df_new = df[~((df['cdr'] == cdr) & (df['pdbid'] == pdbid.lower()))] return df_new
eafeead11b06f9600d4e75b514f953dd03b0b2ae
31,396
def get_handler_name(message): """ Looks at a message, checks it has a sensible type, and returns the handler name for that type. """ # Check message looks OK if "type" not in message: raise ValueError("Incoming message has no 'type' attribute") if message["type"].startswith("_"): raise ValueError("Malformed type in message (leading underscore)") # Extract type and replace . with _ return message["type"].replace(".", "_")
1e0e19db61de993df4c01466551daf075a7fe60a
31,397
def element_selectionnable(elem, liste_tags): """ Voir si l'élément est sélectionnable, c'est-à-dire s'il possède tous les tags qui sont dans la liste :param elem: élement osm (node, way, relation) :type elem: xml.etree.ElementTree.Element :param liste_tags: liste des tags qu'on veut que l'élément ait :type liste_tags: list of str :return: True si l'élément est sélectionnable, False sinon :rtype: bool """ tags = [x.get('k') for x in elem.findall('tag')] for tag in liste_tags: if tag not in tags: return False return True
a075b368bf385514c32341ee87884495a196d204
31,398
def _good_args(value: dict) -> bool: """ Non-restrictive; checks for default args, but allows more. """ if not isinstance(value, dict): return False if not len(value): return False if value.get('response_requested', 'none') not in ['none', 'ack', 'status', 'complete']: return False cant_have_all = ['start_time', 'stop_time', 'duration'] if all(key in value.keys() for key in cant_have_all): return False for key in cant_have_all: check_me = value.get(key, 1) if not isinstance(check_me, int): return False # Any other keys in this dictionary need to be NSID's, with # their value being a dictionary. basic = [*cant_have_all, 'response_requested'] if not all(isinstance(value[nsid_key], dict) for nsid_key in value.keys() if nsid_key not in basic ): return False return True
b8449e87c48b9ffd5741dfc4fcb3ad943f57b781
31,399
def _atom_mapping(atoms: list) -> dict: """ Maps atoms to a dictionary. :param atoms: a list of atoms :return: a dictionary mapping """ atom_dict = dict() for atom in atoms: atom_dict[atom[1]] = { "size": atom[0] } return atom_dict
388485a5e974eb9b11b226b16c9c6d56f2191542
31,400
import hashlib import pickle def input_data_fingerprint(input_data): """Fingerprint of the input data. Will be used in dupelicate detect. """ m = hashlib.md5() m.update(pickle.dumps(input_data)) return m.hexdigest()
d841e7d1ecbba938ada996294769ab7c6743f480
31,401
def find_last_sublist(list_, sublist): """Given a list, find the last occurance of a sublist within it. Returns: Index where the sublist starts, or None if there is no match. """ for i in reversed(range(len(list_) - len(sublist) + 1)): if list_[i] == sublist[0] and list_[i:i + len(sublist)] == sublist: return i return None
cd3efb83eaeceb5b3705115b6ee57aeb5628db64
31,403
def _fractional_tune(tune: float) -> float: """ Return only the fractional part of a tune value. Args: tune (float): tune value. Returns: The fractional part. """ return tune - int(tune)
42b2cc45c3fa071c0ea78343fb3d6152ecacbbf1
31,405
def _twos_comp(val, bits): """ Convert an unsigned integer in 2's compliment form of the specified bit length to its signed integer value and return it. """ if val & (1 << (bits -1)) != 0: return val -(1 << bits) return val
c0a56d49adc0f8b17920bb6878ed2602836122b4
31,406
def select_random_from_category(conn, cat): """ Obtains a random fortune from a given category. :param conn: A handle to the database connection. :param cat: The category ID. :return: The text of the fortune. """ conn[1].execute("SELECT data FROM fortunes WHERE category = ? ORDER BY " + "RANDOM() LIMIT 1", (str(cat),)) return conn[1].fetchall()[0][0]
622de6c17c718df1d60d124f98003eefdaeefdba
31,407
import sys def userinput(prompttext=""): """ Get the input of the user via a universally secure method prompttext: The text to display while receiving the data. The default is "". """ if sys.version_info > (3, 0): # Python 3 code in this block return input(str(prompttext)) else: # Python 2 code in this block return raw_input(str(prompttext))
a24cab0a1d0cb60dadd1fb02c6ad9f65f9553855
31,408
def get_prefix(n, factor=1024, prefixes=None): """Get magnitude prefix for number.""" if prefixes is None: prefixes = ('',) + tuple('kMGTPEZY') if abs(n) < factor or len(prefixes) == 1: return n, prefixes[0] return get_prefix(n / factor, factor=factor, prefixes=prefixes[1:])
6d66bbe1642b8711484489f3be878196c763607e
31,410
from typing import Tuple def process_arguments(arguments: str) -> Tuple[str, str, int]: """ Process the arguments given to !weather, dividing them into state, location and future Uses default of QLD, Brisbane and 0 if not given """ args = arguments.split(" ") if arguments else [] if args and args[-1].lstrip('-+').isnumeric(): future = int(args.pop()) else: future = 0 # get location if args: if args[0].upper() in ["NSW", "ACT", "NT", "QLD", "SA", "TAS", "VIC", "WA"]: state = args.pop(0).upper() else: state = "QLD" location = " ".join(args) else: state = "QLD" location = "Brisbane" return state, location, future
8bd887bab7d3bbc002973e4a327610933130021a
31,411
def read_input_file(filename: str): """ Reads the input file and creates a list of sentences in which each sentence is a list of its word where the word is a 2-dim tuple, whose elements are the word itself and its label (named entity), respectively. Also creates a map of label to index. Expected files have a sequence of sentences. It has one word by line in first column (in a tab-separated file) followed in second column by its label, i.e., the named entity. The sentences are separated by an empty line. :param filename: Name of the file :return: List of sentences, map of label to index """ sentences = [] sentence = [] label2idx = {'O': 0} label_idx = 1 with open(filename, 'r', encoding='utf-8') as file: for line in file: line = line.strip() if line == "": if len(sentence) > 0: sentences.append(sentence) sentence = [] continue splits = line.split('\t') word = splits[0] label = splits[1] sentence.append((word, label)) if label not in label2idx.keys(): label2idx[label] = label_idx label_idx += 1 if len(sentence) > 0: sentences.append(sentence) return sentences, label2idx
c44e19aafb8b2e1a58b96275bd794cb270b9ad76
31,412
import json def dataframe_to_dict(df): """ convert a pandas DataFrame (or series) to a list of columns ready for conversion to JSON """ # assume df is a pd.DataFrame if it contains "columns", else it is a pd.Series columns = [] if hasattr(df, "columns"): # data frame columns.append({"name": df.index.name, "values": json.loads(df.to_json(orient="split", date_format="iso"))["index"]}) # index for col in df.columns: # columns columns.append({"name": col, "values": json.loads(df[col].to_json(orient="values", date_format="iso"))}) else: # series series = df columns.append({"name": series.index.name, "values": json.loads(series.to_json(orient="split", date_format="iso"))["index"]}) # index columns.append({"name": series.name, "values": json.loads(series.to_json(orient="values", date_format="iso"))}) # values return columns
a00b18fcd1d2502e70cd213af0910a48aa6a546d
31,413
def bsc(n): """ count the bits set in n""" l = n.bit_length() c = 0 x = 1 for _ in range(0, l): if n & x: c = c + 1 x = x << 1 return c
d44dfc3495a293d5f98f053c279813318cb8906a
31,414
def group_months(x): """ A binning function to reduce the number of months patterns are released. """ if x < 3: x = 1 if 3 <= x< 5: x = 2 if 5 <= x< 7: x = 3 if 7 <= x< 9: x = 4 if 9 <= x< 11: x = 5 elif 11 <= x< 13: x = 5 return x
b4bb4a86a3403e3ce897c2d1a49209873279c11f
31,415
def path_velocity(path): """ No velocity consideration, assume uniform time :param path_in: 2D Numpy input with each row being the coordinates visited :return: The array of adjacent row-wise differences """ return path[1:] - path[:-1]
f5e08101abc69632ed659917e03d945f54330c01
31,417
def zip_base(x, y): """ 1000 loops, best of 3: 734 µs per loop """ s = 0 for i, j in zip(x, y): s += i * j return s
fe7ebb09efc66914bc9f0c43d79d0287de63d65d
31,418
def isWinner(board): """Looks at `board` and returns either '1' or '2' if there is a winner or 'tie' or 'no winner' if there isn't. The game ends when a player has 24 or more seeds in their mancala or one side of the board has 0 seeds in each pocket.""" b = board # Make a shorter variable name to use in this function. # If either players has >= 24 or no seeds, the game ends. if (b['1'] >= 24) or (b['2'] >= 24) or \ (b['A'] + b['B'] + b['C'] + b['D'] + b['E'] + b['F'] == 0) or \ (b['G'] + b['H'] + b['I'] + b['J'] + b['K'] + b['L'] == 0): # Game is over, find player with largest score. if b['1'] > b['2']: return '1' elif b['2'] > b['1']: return '2' else: return 'tie' return 'no winner'
ff0b34f3e4072e72533482600124d4be0ad0ba31
31,419
def z_score_vec(data_df, window=14): """ Standardized deviation of the prices in a window. :param data_df: Unstacked for one symbol only. :param window: An integer number of days to look back. :return: A pandas Series with the values of z-score for all the valid dates. """ close = data_df['Close'] return close.rolling(window=window, center=False).apply(lambda x: (x[-1] - x.mean()) / x.std(ddof=1))
849bb2226661527f6ea8f4c98b3b8935831fd8d2
31,420
def get_from_nested_dict(nested_dicts, path): """ Retrieves a value from a nested_dict. Args: path (str) """ value = nested_dicts for key in path.split("/"): if type(value) is dict: value = value[key] elif type(value) is list: value = value[int(key)] return value
4edb3fdfade39954ea47d125e141c9fdacb847e8
31,421
from typing import Any def _strip_db_indexing_key(obj: dict[str, Any]): """ Strip MongoDB's ObjectId key `_id`. """ return {key: obj[key] for key in obj if key != "_id"}
5c63742bb4f8e2bd8a3844eb549468a33dd5df7c
31,422
def is_ambiguous_align(tags, multi_align_tag): """Returns whether the read aligns to multiple locations. The multi_align_tag depends on mapper. For bowtie2 it is XS.""" for t in tags: if t[0] == multi_align_tag: return True return False
fa70ff74d57215b74ccd7afd10ca381f7a1c5762
31,423
def html_table(table, cellspacing="10", style='"width:500px"'): """ Creates a html code to a table :param table: A list of rows of list of cells(text ) :param style: Html style for the table: e.g "width:300px" (default ) :return: html code to the table e.g: print html_table( [['hello', 'world', 'haaa'], ['test', 'test4', 'cx854']]) <table style="width:300px"> <tr> <td>hello</td> <td>world</td> <td>haaa</td> <tr> <tr> <td>test</td> <td>test4</td> <td>cx854</td> <tr> </table> """ txt_ = "" for row in table: txt = "" for cell in row: txt = "".join([txt, '\t<td>', cell, '</td>\n']) # print txt txt_ = "".join([txt_, '<tr>\n', txt, '\n<tr>\n']) # return "".join( [ "<table style=", style, '>\n', txt_, '\n</table>' ]) return "".join(['<table cellspacing="', str(cellspacing), '" >\n', txt_, '\n</table>'])
b3404a43b7ca20fe83f97892595d53de90a8987d
31,424
import math def entropy(ps): """Calculates the entropy (log 2) of the distribution given by p """ entropy = 0.0 for p in ps: if not(p == 0): entropy -= p*math.log(p, 2) return entropy
609e0e2f03579c8ce39f274116f69ca177eabccc
31,425
import pickle import copy def test_info_preserved_pickle_copy_init(mixin_cols): """ Test copy, pickle, and init from class roundtrip preserve info. This tests not only the mixin classes but a regular column as well. """ def pickle_roundtrip(c): return pickle.loads(pickle.dumps(c)) def init_from_class(c): return c.__class__(c) attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta') for colname in ('i', 'm'): m = mixin_cols[colname] m.info.name = colname m.info.format = '{0}' m.info.description = 'd' m.info.meta = {'a': 1} for func in (copy.copy, copy.deepcopy, pickle_roundtrip, init_from_class): m2 = func(m) for attr in attrs: assert getattr(m2.info, attr) == getattr(m.info, attr)
7ada630bb71ed3f0f7d156bde892bd7696f87762
31,427
def get_ray_index_for_grid_point(ray, grid_idx, n_depth_pts): """Given a ray and a particular point on the physical grid, return the index along that ray corresponding to that point.""" if ray.mu < 0: return (grid_idx) else: return (n_depth_pts - (grid_idx + 1))
9d1573812c4d66ef53cf0640a8c1e6602b170772
31,428
def cB_to_zipf(cB): """ Convert a word frequency from centibels to the Zipf scale (see `zipf_to_freq`). The Zipf scale is related to centibels, the logarithmic unit that wordfreq uses internally, because the Zipf unit is simply the bel, with a different zero point. To convert centibels to Zipf, add 900 and divide by 100. """ return (cB + 900) / 100
c71a2a7b7417480cc00855852cf447e4741317b0
31,429
def GetLongestMatchOption(searchstr, options=[], ignore_case=True): """ Get longest matched string from set of options. params: searchstr : string of chars to be matched options : array of strings that are to be matched returns: [] - array of matched options. The order of options is same as the arguments. empty array is returned if searchstr does not match any option. example: subcommand = LongestMatch('Rel', ['decode', 'enable', 'reload'], ignore_case=True) print subcommand # prints ['reload'] """ if ignore_case: searchstr = searchstr.lower() found_options = [] for o in options: so = o if ignore_case: so = o.lower() if so == searchstr: return [o] if so.find(searchstr) >=0 : found_options.append(o) return found_options
b8083022c4b831ac80ad3b22a42655c5b5754897
31,431
def selected_filter_by_market_value(dfg, min_total_mv=None): """使用总市值过滤 :param dfg: 单个交易日的强势股选股结果 :param min_total_mv: 最小总市值,单位为万元,1e6万元 = 100亿 :return: 过滤后的选股结果 """ if dfg.empty or not min_total_mv: return dfg return dfg[dfg['total_mv'] >= min_total_mv]
fe53550c146faab874d4efc0fcf7a508e54e7bc3
31,433
def bayes_factor_pass(bayes_factor, bf_filter): """ Checks to see out of a list of bayes factors which ones pass the filter check. 1 if pass, 0 if no pass. """ if isinstance(bayes_factor, float): bayes_factor = [bayes_factor] bf_list = [] for bf in bayes_factor: if abs(bf) < bf_filter: bf_list.append(0) else: bf_list.append(1) return bf_list
067b929cdc52502baaf14b7532dbfa69e495551e
31,434
def get_wavelengths(snirf): """Returns a list of the channel wavelengths in the SNIRF file.""" wavelengths = snirf["nirs"]["probe"]["wavelengths"][:] return wavelengths.flatten().astype(float)
eca371732c0faf0d8f6bbcad0049601b6b889f1a
31,435
def make_catalog_sources(catalog_roi_model, source_names): """Construct and return dictionary of sources that are a subset of sources in catalog_roi_model. Parameters ---------- catalog_roi_model : dict or `fermipy.roi_model.ROIModel` Input set of sources source_names : list Names of sourcs to extract Returns dict mapping source_name to `fermipy.roi_model.Source` object """ sources = {} for source_name in source_names: sources[source_name] = catalog_roi_model[source_name] return sources
1939e8d1819b3b6823edf1c4cd1635d461bfe189
31,436
def load_tag_files_options ( options ): """From the options, load treatment tags and control tags (if available). """ options.info("#1 read treatment tags...") tp = options.parser(options.tfile[0], buffer_size=options.buffer_size) if not options.tsize: # override tsize if user specified --tsize ttsize = tp.tsize() options.tsize = ttsize treat = tp.build_fwtrack() #treat.sort() if len(options.tfile) > 1: # multiple input for tfile in options.tfile[1:]: tp = options.parser(tfile, buffer_size=options.buffer_size) treat = tp.append_fwtrack( treat ) #treat.sort() treat.finalize() if options.cfile: options.info("#1.2 read input tags...") control = options.parser(options.cfile[0], buffer_size=options.buffer_size).build_fwtrack() #control.sort() if len(options.cfile) > 1: # multiple input for cfile in options.cfile[1:]: cp = options.parser(cfile, buffer_size=options.buffer_size) control = cp.append_fwtrack( control ) #control.sort() control.finalize() else: control = None options.info("#1 tag size is determined as %d bps" % options.tsize) return (treat, control)
45b91f306d9ee71a941fc4324fd2b12e7abf5a76
31,437
from typing import Match import re def find_raw_string_literal_end(file_src: str, m: Match[str]) -> int: """Returns the pos just beyond the raw string literal that starts with m.""" if not m.group(0).endswith('R"'): raise AssertionError(f'Expected start of raw string literal: {m.group()}') # We've matched the start of a Raw String mcucore::Literal. Determine the delimiter, # then search for the end of the string. regexp = re.compile(r'[^()\\ \f\n\r\t\v]{0,16}\(') m2 = regexp.match(file_src, pos=m.end()) if not m2: raise AssertionError( 'Unable to locate opening delimiter of the Raw String mcucore::Literal ' f'starting at {m.start()}: {file_src[m.start():m.start()+32]!r}') needle = ')' + m2.group()[0:-1] + '"' pos1 = file_src.find(needle, m2.end()) if pos1 < 0: raise AssertionError( 'Unable to locate closing delimiter of the Raw String mcucore::Literal ' f'starting at {m.start()}: {file_src[m.start():m.start()+32]!r}') pos2 = pos1 + len(needle) return pos2
1b5523d7a4185bc857e4e7c44b1a5334f56efcb2
31,439
def notas(*notas, status=False): """ Funcao para analisar as notas e as situacoes dos alunos. :param notas: Notas dos alunos [Obrigatorio]. :param status: Mostra a situacao do aluno [Opcional], Padrao: "False". :return: Retorna as informacoes do aluno (dicionario). """ aluno = dict() for i, k in enumerate(notas): aluno[f"nota{i+1}"] = k media = sum(notas)/len(notas) aluno['media'] = media aluno['total'] = len(notas) if status: if media >= 7: aluno["status"] = 'Boa' elif 7 > media >= 5: aluno["status"] = 'Razoavel' elif 5 > media: aluno["status"] = 'Ruim' return aluno
75323654a2e68895b4b73004803a9a17fc497bc5
31,440
def can_contain(parent_type, child_type): """ Returns true if parent block can contain child block. """ return (parent_type in ['Document', 'BlockQuote', 'ListItem'] or (parent_type == 'List' and child_type == 'ListItem'))
c8cef3515b3306f779525c59486b526654649433
31,441
def sum_2d_array(two_d_array): """ - Add code in the defined function to sum up the internal arrays. Returning an array of the sums. - Your input will be a 2d array - Output should be a 1d array - If a sub array is empty the sum is 0 """ arr = [] for array in two_d_array: arr.append(sum(array)) return arr
f44351fe68112ca9b9b6efe980a8c19a5437d340
31,444
def test_callback(container, text=''): """ A callback used for basic testing. """ return { 'actions': [ { 'action': 'chat.postMessage', 'kwargs': { 'text': '{}'.format(text) } } ] }
7e83d55ce00b176a1aa06da7ad5faea5ec58e1b7
31,448
def interpret_as_slice(column): """Interprets the 'column' argument of loadFitnessHistory into a slice()""" if column is None: # No specific column is requested, return everything return slice(None) elif isinstance(column, int): # One specific column is requested return slice(column, column + 1) elif len(column) == 2 and all(isinstance(val, int) for val in column): # Multiple columns are requested return slice(*column) else: # 'column' does not match expected format raise Exception("Invalid format for 'column': {col}".format(col=column))
081a9634169a0752ecd311c82b22e3bd498048d8
31,451
import argparse def get_args_from_command_line(): """Parse the command line arguments.""" parser = argparse.ArgumentParser() # necessary parser.add_argument("--checkpoint_dir", type=str, help="Path to the models", default="/home/manuto/Documents/world_bank/bert_twitter_labor/code/glove-text-cnn/runs/default_run_name/checkpoints") parser.add_argument("--eval_data_path", type=str, help="Path to the evaluation data. Must be in csv format.", default="/home/manuto/Documents/world_bank/bert_twitter_labor/code/twitter/data/may20_9Klabels/data_binary_pos_neg_balanced") parser.add_argument("--vocab_path", type=str, help="Path pickle file.", default="/home/manuto/Documents/world_bank/bert_twitter_labor/data/glove_embeddings/vocab.pckl") parser.add_argument("--preprocessing", default=False, type=bool) args = parser.parse_args() return args
177bd80d7700f42db0b27d4af47276424de54584
31,452
import torch def cal_S(points): """ #计算N个四边形的面积 points:torch.Tensor() with shape (N, 4, 2) """ x = points[..., 0] y = points[..., 1] x1 = x.roll(1, dims = 1) y1 = y.roll(1, dims = 1) # y1 = y[:, [1, 2, 3, 0]] # x1 = x[:, [1, 2, 3, 0]] return torch.sum((y*x1 - x*y1 )/2, 1)
8c4284c10b1a4bb41b99fa506fb399fa13caea48
31,453
import os import glob import pathlib def find_zephyr_sdk(): """Find the Zephyr SDK, if it's installed. Returns: The path to the Zephyr SDK, using the search rules defined by https://docs.zephyrproject.org/latest/getting_started/installation_linux.html """ def _gen_sdk_paths(): yield os.getenv('ZEPHYR_SDK_INSTALL_DIR') for searchpath in ('~/zephyr-sdk', '~/.local/zephyr-sdk', '~/.local/opt/zephyr-sdk', '~/bin/zephyr-sdk', '/opt/zephyr-sdk', '/usr/zephyr-sdk', '/usr/local/zephyr-sdk'): for suffix in ('', '-*'): yield from glob.glob(os.path.expanduser(searchpath + suffix)) for path in _gen_sdk_paths(): if not path: continue path = pathlib.Path(path) if (path / 'sdk_version').is_file(): return path raise OSError('Unable to find the Zephyr SDK')
7313799a9c1e0e3b4cf1687fdb7383f77b8fa4b7
31,454
import os def pdb_codes_in_file(file_name): """ read pdb codes from file pdb code mast be 4 letter long """ data = set() if os.path.isfile(file_name): data = open(file_name,'r').read().splitlines() # remove comment lines data = [x for x in data if x[0] != '#'] # split lines, if there is more than one code per line data = [x for xi in data for x in xi.split(' ')] data = {x for x in data if len(x) == 4} return data
33334cc06ddf8a5912a8667ac6849e9dd826b6ca
31,456
import pickle def read_pickle(path): """Read serialized pickle file. Parameters ---------- path : str Path of the file to read. Returns ------- data = pandas.DataFrame or np.ndarray Data store in the pickle file (an image or coordinates with labels and metadata). """ # open the file and read it with open(path, mode='rb') as f: data = pickle.load(f) return data
e66fc4bc3d3047a420ce5e5751fe36b178f602d5
31,458
import pkgutil def iter_namespace(pkg): """ """ return pkgutil.iter_modules(pkg.__path__, pkg.__name__ + '.')
bd940105c0956f45afeece6c3274d627a3090061
31,461
def calculate_sequence_distance(seq1, seq2, case_insensitive=True): """Calulate the number of nucleotide differences between two sequences. The sequences must be the same length. Args: seq1 : DNA string 1 seq2 : DNA string 2 case_insensitive : optional flag for case insensitive compare, defaults to True Returns: int number of differences """ if case_insensitive: allowed_bases = frozenset(['A', 'C', 'G', 'T']) seq1 = seq1.upper() seq2 = seq2.upper() else: allowed_bases = frozenset(['A', 'C', 'G', 'T', 'a', 'c', 'g', 't']) mismatches = 0 for pos in range(len(seq1)): base1 = seq1[pos] base2 = seq2[pos] if base1 not in allowed_bases: continue if base2 not in allowed_bases: continue if base1 != base2: mismatches += 1 return mismatches
cbbd5b3528dcd1b4cbd89f0d4e020aa7d829f203
31,462
import json def json_loads(resp): """Handle parsing json from an HTTP response for both Python 2 and Python 3.""" try: charset = resp.headers.getparam('charset') charset = 'utf8' if not charset else charset except AttributeError: charset = resp.headers.get_content_charset() return json.loads(resp.read().decode(charset))
41781a2b55287b5c31439346d8d37f72b481823c
31,463
def reaction_path(speed): """ speed is km/h """ return speed * 0.3
f7a16830c665be74922ff9bdfaeafc614c2e77fa
31,464
import torch def max_neg_value(tensor): """Returns the maximum negative value that can be represented by the data type of the tensor""" return -torch.finfo(tensor.dtype).max
a9ad008ba712d2ac8e7f94e81c20a36f5eebbf8c
31,465
def remove_consecutive_dups(lst): """ return a copy of lst with consecutive duplicates of elements eliminated. For example, for lst = [a, a, a, a, b, c, c, a, a, d, e, e, e, e], the returned list is [a, b, c, a, d, e]. """ return [v for i, v in enumerate(lst) if i == 0 or v != lst[i-1]]
7d791c87c5c51c37c7ca5ffa01d04e48b3de0286
31,466
import numpy def kl(p, q): """ D_{KL} (P||Q) = \sum_i P(i)log (P(i)/Q(i)) :param p: :param q: :return: """ return numpy.sum(numpy.where(p != 0, p * numpy.log(p / q), 0))
b9c3b423e63d4759ca9152855c54c66b2587ccc1
31,467
def make_arguments(**params): """ Create a script argument string from dictionary """ param_strings = ["--{} '{}'".format(key, params[key]) for key in params.keys()] return ' '.join(param_strings)
297247bebb1705d3bba04421864bbfa8509b7e15
31,468
def read_file(file_name, verbose=False): """Takes a file name and returns the lines from the file as a list. Optional verbosity param :param file_name: path to file to be read :param verbose: run with extra logging :returns lines: list strings representing lines in the file """ if verbose: print('Reading file: <' + file_name + '>') lines = None with open(file_name, 'r+') as infile: lines = infile.readlines() if verbose: print('Lines read: <' + str(len(lines)) + '>') return lines
3f689a78d61b7d1d4eb35f0c232fa945ee123074
31,469
def merge_results(x, y): """ Given two dicts, x and y, merge them into a new dict as a shallow copy. The result only differs from `x.update(y)` in the way that it handles list values when both x and y have list values for the same key. In which case the returned dictionary, z, has a value according to: z[key] = x[key] + z[key] :param x: The first dictionary :type x: :py:class:`dict` :param y: The second dictionary :type y: :py:class:`dict` :returns: The merged dictionary :rtype: :py:class:`dict` """ z = x.copy() for key, value in y.items(): if isinstance(value, list) and isinstance(z.get(key), list): z[key] += value else: z[key] = value return z
8e0f301bd1840381b2ff1a5ef64c142caa4e21a3
31,470
def getABMN(scheme, idx): """ Get coordinates of four-point cfg with id `idx` from DataContainerERT `scheme`.""" coords = {} for elec in "abmn": elec_id = int(scheme(elec)[idx]) elec_pos = scheme.sensorPosition(elec_id) coords[elec] = elec_pos.x(), elec_pos.y() return coords
219dfc01b94e277fbe37e76e1076f94b9ae354cf
31,472
def _ec2Instance_tag_dict(ec2_object): """Given an tagable ec2_object, return dictionary of existing tags.""" tag_dict = {} if ec2_object.tags is None: return tag_dict for tag in ec2_object.tags: tag_dict[tag['Key']] = tag['Value'] return tag_dict
b63a38faf15d839a3081b35b377d5329c3a9e796
31,474
def to_mysql(dbtype, vin): """Simple type conversion to push numpy types to native python types which are compatable with DB""" if "FLOAT" in dbtype.__str__(): vout = float(vin) elif "INTEGER" in dbtype.__str__(): vout = int(vin) else: vout = vin return vout
544c526ccefb69a295de21499a30bd3f8334e320
31,477
from typing import Optional import tempfile from pathlib import Path import textwrap import subprocess import sys def run_editor(filename: str, text: bytes, comments: Optional[str] = None, allow_empty: bool = False) -> bytes: """Run the editor configured for git to edit the given text""" with tempfile.TemporaryDirectory() as tmpdir: path = Path(tmpdir) / filename with open(path, 'wb') as f: for line in text.splitlines(): f.write(line + b'\n') if comments: # If comments were provided, write them after the text. f.write(b'\n') for comment in textwrap.dedent(comments).splitlines(): f.write(b'# ' + comment.encode('utf-8') + b'\n') # Invoke the editor proc = subprocess.run([ "bash", "-c", f"exec $(git var GIT_EDITOR) '{path}'"]) if proc.returncode != 0: print("editor exited with a non-zero exit code", file=sys.stderr) sys.exit(1) # Read in all lines from the edited file. lines = [] with open(path, 'rb') as of: for line in of.readlines(): if comments and line.startswith(b'#'): continue lines.append(line) # Concatenate parsed lines, stripping trailing newlines. data = b''.join(lines).rstrip() + b'\n' if data == b'\n' and not allow_empty: print("empty file - aborting", file=sys.stderr) sys.exit(1) return data
7bff852854fb9dce43952b0e0d3840f3b1b63c00
31,478
import math def build_lr(total_steps, lr_init=0.0, lr_end=0.0, lr_max=0.1, warmup_steps=0, decay_type='cosine'): """ Applies cosine decay to generate learning rate array. Args: total_steps(int): all steps in training. lr_init(float): init learning rate. lr_end(float): end learning rate lr_max(float): max learning rate. warmup_steps(int): all steps in warmup epochs. Returns: list, learning rate array. """ lr_init, lr_end, lr_max = float(lr_init), float(lr_end), float(lr_max) decay_steps = total_steps - warmup_steps lr_all_steps = [] inc_per_step = (lr_max - lr_init) / warmup_steps if warmup_steps else 0 for i in range(total_steps): if i < warmup_steps: lr = lr_init + inc_per_step * (i + 1) else: if decay_type == 'cosine': cosine_decay = 0.5 * (1 + math.cos(math.pi * (i - warmup_steps) / decay_steps)) lr = (lr_max - lr_end) * cosine_decay + lr_end elif decay_type == 'square': frac = 1.0 - float(i - warmup_steps) / (total_steps - warmup_steps) lr = (lr_max - lr_end) * (frac * frac) + lr_end else: lr = lr_max lr_all_steps.append(lr) return lr_all_steps
488e75e661cf4397a67bb2ae21c8882ab795c739
31,482
def remove_duplicates(array): """Write a function called remove_duplicates that takes a list and returns a new list with only the unique elements from the original.""" copy_array = array[:] copy_array.sort() to_return_array = copy_array[:] val = copy_array[0] for i in range (1, len(array)): if val == copy_array[i]: to_return_array.remove(val) val = copy_array[i] return to_return_array
406264ee588866ff839a3eac849103dae605f126
31,484
import statistics def extract_sasa_data(siteresidue_list, pop): """ Extracts accessible surface area data from .out file generated by POPSlegacy, then matches the data in the .out file to the binding site in the mol2 file. Used POPSlegacy https://github.com/Fraternalilab/POPSlegacy. """ # extracting sasa data from .out file residue_list = [] qsasa_list = [] # opening .out file with open(pop) as popsa: for line in popsa: line_list = line.split() # extracting relevant information if len(line_list) == 12: residue_type = line_list[2] + line_list[4] if residue_type in siteresidue_list: qsasa = line_list[7] residue_list.append(residue_type) qsasa_list.append(qsasa) qsasa_list = [float(x) for x in qsasa_list] median = statistics.median(qsasa_list) qsasa_new = [median if x == '-nan' else x for x in qsasa_list] # matching amino acids from .mol2 and .out files and creating dictionary qsasa_data = [] fullprotein_data = list(zip(residue_list, qsasa_new)) for i in range(len(fullprotein_data)): if fullprotein_data[i][0] in siteresidue_list: qsasa_data.append(float(fullprotein_data[i][1])) return qsasa_data
93d7ae3ab22bfc059f99a19afeca0845a408ca9e
31,485
def getTaskPosition(task): """Gets the current position of the task in its container (phase or group task)""" index = 1 for t in task.container.tasks: if task.id == t.id: return index index += 1
6a62b448e3fdcc36fcb41911de1aacecc4bcbab2
31,486
def format_anime_status(media_status: str) -> str: """ Formats the anime status. """ AnimeStatus = { "FINISHED": "Finished", "RELEASING": "Currently Airing", "NOT_YET_RELEASED": "Not Yet Aired", "CANCELLED": "Cancelled", } return AnimeStatus[media_status]
66f64596c02f095a9010295fab9506e647282599
31,487
def checksum(buffer, checkA, checkB): """ 8-bit Fletcher algorithm for packet integrity checksum. Refer to [1] (section 32.4 UBX Checksum, pages 135 - 136). Inputs: buffer - They byte buffer to compute the checksum over. checkA - The first part of the reference checksum to compare the computed value to. checkB - The second part of the reference checksum to compare the computed value to. Outputs: valid - Boolean flag indicating whether or not packet checksum matches reference checksum. buffer_checkA - First part of checksum computed from input buffer. buffer_checkB - Second part of checksum computed from input buffer. """ # Compute buffer checksum buffer_checkA = 0 buffer_checkB = 0 for byte in buffer: buffer_checkA = buffer_checkA + byte buffer_checkB = buffer_checkB + buffer_checkA buffer_checkA = buffer_checkA & 0xFF buffer_checkB = buffer_checkB & 0xFF # Compare to packet provided checksum valid = True if checkA != buffer_checkA or checkB != buffer_checkB: valid = False return valid, buffer_checkA, buffer_checkB
b6fb80603c03e96cbe7c4f34760c71b0b75113f8
31,488
def leiaOperacao(text): """ Esta função lê e valida o input de um usuário, sendo que esse input só pode ser de um número de 1 a 5. :param text: Texto que será exibido no input. :return: O valor, já tratado, digitado pelo usuário. """ operation = 0 while True: try: operation = int(input(text)) while operation == 0 or operation > 5: print("\33[31mVocê não digitou um valor válido. Tente novamente!\33[m") continue except (ValueError, TypeError): print("\33[31mVocê não digitou um valor válido. Tente novamente!\33[m") continue finally: break return operation
667667c8e5d945a7d25f1400010fca8cc9e6da1e
31,490
import glob def show_fbs_dirs(): """Show available FBS opsim database directories.""" fbs_dirs = glob.glob('/home/idies/workspace/lsst_cadence/FBS_*/') return fbs_dirs
163ae29862f2c657d3414f8cc6c2b563a079a329
31,491
def convert_to_int(s): """ Filter to convert a string to an int """ if s is not None: return int( s.strip() ) return None
8cb0a12b107644f969c54603d59ebf3086fdba22
31,492
def _build_selpand(item, attributes): """ This method builds an expand or select term for an STA Query :param item: string either expand or select :param attributes: a list of strings that has to be expanded / selected :return: the resulting select or expand-term """ selector = item + "=" if not isinstance(attributes, list): attributes = [attributes] for i, attribute in enumerate(attributes): if i != 0: selector += "," selector += str(attribute) return selector
a9eb52ba6107411f48f140033fc4227906228a0e
31,494
def sumMatchesAndMismatches(segment): """ Get total matches/mismatches from CIGAR string (M field) Code dictionary: M BAM_CMATCH 0 I BAM_CINS 1 D BAM_CDEL 2 N BAM_CREF_SKIP 3 S BAM_CSOFT_CLIP 4 H BAM_CHARD_CLIP 5 P BAM_CPAD 6 = BAM_CEQUAL 7 X BAM_CDIFF 8 B BAM_CBACK 9 """ return sum( [value for (code, value) in segment.cigartuples if code == 0] )
101b50d859c949e18563e981b2c419a224e3de68
31,495
import hashlib import uuid def createUniqeNodeName(channel=None): """This function generate 10 character long hash""" hash = hashlib.sha1() salt = channel if channel is not None else str(uuid.uuid4()) hash.update(salt.encode('utf-8')) return hash.hexdigest()
7c9c7b6cb9e80ba932d4ab2cc7ad3f53f0e0037c
31,496
def check_revealed_tile(board, tile): """ Function checks if a tile location contains a ship piece. board -> the tiled board either a ship piece or none tile -> location of tile returns True if ship piece exists at tile location """ return board[tile[0][0]][tile[0][1]] != None
c605c851d238f1fb2c453f682466588c8673cb27
31,498
def linear_combinaison(alpha = 1.0, m1 = {}, beta = None, m2 = {}): """ Return the linear combinaison m = alpha * m1 + beta * m2. """ if m2 == {}: m2 = m1 beta = 0.0 m = {} for (name, value) in m1.items(): m[name] = alpha * value + beta * m2.get(name, 0.0) return m
4bb76e76e0e905e29135f9d5d00c9e16edb7920d
31,499
import torch from typing import OrderedDict def meta_layer(input_feature_shape: torch.Size, target_module: torch.nn.Parameter): """ A 'parallel'/'meta' layer applied to previous layer/block's features to infer global statistics of next layer's weight matrix Args: - layer_op: Underlying layer operation module """ raise NotImplementedError conv = torch.nn.Conv2d(16, 3, (3, 3)) normalization = dict(norm_type=..., norm_kwargs=..., input_shape=...) underlying_layer_ops = layer(layer_op=conv, act_fn=torch.nn.ReLU, dropout_prob=None, preactivation=False, **normalization) ops = [('underlying_layer_ops', underlying_layer_ops), ] return torch.nn.Sequential(OrderedDict(ops))
7eea9d5a1a97e1017fba6e582b8494fbabc1ad55
31,500
def monitor_cb(ud, msg): """ Arguments are the userdata and the message. This needs to return False when we want the monitor state to terminate. In this case, the monitor state will return 'invalid' """ return False
1b1b210e94de0bcf2fdae9ecb884be945d9ead01
31,501
def _hide_num_nodes(shape): """Set the first dimension as unknown """ shape = list(shape) shape[0] = None return shape
ea0a8bb452752c9efdce4ae75d9a37cf301d4217
31,502
from typing import Dict from typing import Any def summed_frequencies(freq: Dict[Any, int]): """Get total sum of frequencies in given frequency dict.""" return sum(freq.values())
d2dc9b873aab42c8c4739b350545b58bd60620a3
31,503
def get_cheie(item): """Ajuta la sortarea descrescatoare.""" return item[1]
0307a60fdab66c8aaf6e4ee6ea140c74b575dda1
31,505
import json def dictify_json_loads(text: str): """ Like json.loads, but returns an empty dict for an empty or whitespace string. :param text: :type text: :return: :rtype: """ text = text.strip() if text: return json.loads(text) else: return {}
30479d493c1215ac2595d2dc75aae7023ea17bf5
31,506
def sort_draft(single_draft): """Given a single draft string, process that string into a list of picks. :param single_draft: A list of unsorted draft tokens from database. :return: A list of picks of length 3*ps. Each pick is a list of cardnames in the pack shown to the user. The card picked by the user is always the top card. """ #Extract picks from draft. picks = single_draft[2:] pick_list = [] #Get the pack size. ps = int(len(picks) / 24) #Track all picks in pack 1. for pick in range(ps): cur_pick = [] for x in range(pick, (3*ps+1)*(ps-pick), 3*ps+1): x = x % (24*ps) cur_pick.append(picks[x]) pick_list.append(cur_pick) #Track all picks in pack 2. for pick in range(ps): cur_pick = [] for x in range(ps+pick, (-3*ps+1)*(ps-1-pick), -3*ps+1): x = x % (24*ps) cur_pick.append(picks[x]) pick_list.append(cur_pick) #Track all picks in pack 3. for pick in range(ps): cur_pick = [] for x in range(2*ps+pick, (3*ps+1)*(ps-pick), 3*ps+1): x = x % (24*ps) cur_pick.append(picks[x]) pick_list.append(cur_pick) return pick_list
1b03382babdf1eaba2851701363c4fde2f1fa90b
31,507
def tostr(value): """Cast value to str except when None value[in] Value to be cast to str Returns value as str instance or None. """ return None if value is None else str(value)
252dca23a22a13ad6b58d310cc1dae5196792fc8
31,508
def WIFCONTINUED(status): """Return ``True`` if the process has been continued from a job control stop, otherwise return ``False``.""" return False
cd7361b9f0edde72327e14306d710120b0460644
31,509
def expected_en(): """Expected serialization when english chosen.""" return { 'id': 'text', 'title_l10n': 'Text', 'description_l10n': 'Publications', 'icon': 'file-o', 'props': {}, }
aa83fb9b2dcd915a91ebeacb14f0c8d1d20d7b3f
31,511
def _contains_op(meta_graphdef, op_name): """Returns true if the graph def contains the given op.""" # Check the main graph if any(node.op == op_name for node in meta_graphdef.graph_def.node): return True # Check the graph genederated from user defined functions for func in meta_graphdef.graph_def.library.function: for node in func.node_def: if node.op == op_name: return True return False
bdcc4150877796594a261d93320382a415cf230a
31,512
def sequence_of_items_containing_forbidden_key(): """sequence of items containing forbidden key.""" return [('strict', 'indeed'),]
fd254fc1763bb97cfac87a9400ccd2b33e3d7d4c
31,514
def password() -> str: """The password for accessing the Shopify API""" return 'mysupersecretpassword'
5f7ca26f7ec89a5ed23c4169502ec0a9c30a75cf
31,515
def audio_dir(tmp_path_factory): """Create a directory with different types of audio files.""" rootdir = tmp_path_factory.mktemp('audio_dir') # output_wav = np.cos(2 * np.pi * 440 * np.arange(0, 5, 1 / 44100)) # ... save the file ... # output_flac = Source('flac output.wav') return rootdir
f20fab53753118c6120612d86e932c52bb80468f
31,516
from typing import Dict from typing import Any def _get_experiment_config(name) -> Dict[str, Any]: """Fetches the gin config.""" return { 'name': name, }
5f9bbd5795cb38cf7fb40934c1e1c6d8fbcb935c
31,517
import json def prettify_json(dictionary, sort_keys=True, indent=4): """ Takes a dictionary as an input and returns a string containing the JSON with correct format. :param dictionary: dictionary to be prettified (dict) :param sort_keys: indicates if the keys should be sorted (bool) :param indent: number of spaces to use as indentation (int) :return: prettified json (str) """ report = json.dumps(dictionary, sort_keys=sort_keys, indent=indent, separators=(',', ': ')) return report
885db2075ef5041fe431863b9b8b607faece4e5c
31,518
def get_search(names, verb = 'show'): """ Returns an NLP search query to retrieve neurons given a list of neurons. # Arguments names (list): List of neurons to retrieve. verb (str): Verb to use. Defaults to 'show'. Can be 'show', 'add', 'keep' or 'remove'. # Returns str: NLP query string. """ _str = verb + ' /:referenceId:['+', '.join([str(i) for i in names])+']' print(_str) return _str
d4878c9365d60f50cd60f032a763a538d04cc514
31,519
def icon_to_pixmap(icon, width, height=None): """Converts a given icon to a pixmap. Automatically adjusts to high-DPI scaling. :param icon: Icon to convert. :param int width: Target point height. :param int height: Target point height. :return: ``QPixmap`` instance. """ height = height or width return icon.pixmap(width, height)
d7e38ec7fc0efda5751cf301a935a71f9ca9589c
31,520