content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def is_question_answer_yes(question: str) -> bool: """ Prompt a yes/no question to the user. Parameters ---------- question : str The question to print on stdout. Returns ------- bool True if the user answers "yes", False otherwise. """ answer = input(question + " [y/N] ") answer = answer.lower() if answer == "y" or answer == "yes": return True return False
3b8564d0d984a315fcf46acc935db1759f148af5
50,328
import numpy def multivariate_Gaussion(X, Mu, Sigma2): """Computes the probability density function of multivariate Gaussian distribution. Args: X (1D numpy.array (float)): n by 1 feature vector. Mu (1D numpy.array (float)): n by 1 mean vector. Sigma2 (1D numpy.array (float)): n by 1 variance vector. Returns: p (float): probability of input X. """ assert X.shape == Mu.shape, "Input X and Mu must be the same shape" assert Mu.shape == Sigma2.shape, "Input Mu and Sigma2 must be the same shape" Sigma2 = numpy.diagflat(Sigma2) Sigma2_inv = numpy.linalg.inv(Sigma2) k = X.shape[0] p = 1 / numpy.sqrt( (2 * numpy.pi) ** k * numpy.linalg.det(Sigma2) ) exp_power = -0.5 * numpy.dot( numpy.dot( (X - Mu).T, Sigma2_inv ), (X - Mu) ) p *= numpy.exp(exp_power) return p
fdbc95219f2c0fd8a504f1b0e948f345c79ce115
50,329
def is_real_contact(p): """Returns true if this <p> element is an actual contact, and not a blank <p>. False otherwise. Arguments: p {bs4.element.Tag} -- <p> element to inspect Returns: [bool] -- Whether this element represents a contact. """ return p.strong is not None
3d00bc8a9409d1a9d1e0bbb1750f70d021282840
50,330
def _G(x,y): """Helper function. returns True when the timestamps of x and y are within 5 seconds.""" return abs((x.timestamp - y.timestamp).total_seconds()) <= 5
8fc6bc08b8fd70438031878d6906e6e487166af9
50,331
def toStringDuration (duration): """Returns a description of the given duration in the most appropriate units (e.g. seconds, ms, us, or ns). """ table = ( ('%dms' , 1e-3, 1e3), (u'%d\u03BCs', 1e-6, 1e6), ('%dns' , 1e-9, 1e9) ) if duration > 1: return '%fs' % duration for format, threshold, factor in table: if duration > threshold: return format % int(duration * factor) return '%fs' % duration
2b000767563df5addaa4c2d7f98f44841d81130a
50,332
def get_groups(data, scope, args): """Get groups.""" groups = {} if scope == 'fed' or scope == 'local': args["scope"] = scope groups = data.client.list("group", "group", **args) return groups
754cfc75c62f9ce89c82db001b9e9c2597988193
50,334
def vararg_callback(option, opt_str, value, parser): """Callback for an option with variable arguments. Manually collect arguments right of a callback-action option (ie. with action="callback"), and add the resulting list to the destination var. Usage: parser.add_option("-c", "--callback", dest="vararg_attr", action="callback", callback=vararg_callback) Details: http://docs.python.org/2/library/optparse.html#callback-example-6-variable -arguments """ value = [value] def floatable(str): try: float(str) return True except ValueError: return False for arg in parser.rargs: # stop on --foo like options if arg[:2] == "--" and len(arg) > 2: break # stop on -a, but not on -3 or -3.0 if arg[:1] == "-" and len(arg) > 1 and not floatable(arg): break value.append(arg) del parser.rargs[:len(value)-1] setattr(parser.values, option.dest, value)
0d25d97e4702a83b46a20842e9dea1100de575da
50,336
def assemble_haplotypes(snps): """Input phased SNPS data. Assemble haplotype strings for two chromosomes.""" h = {"A": {}, "B": {}} for snp in snps: if snp.gt == "1|0": h["A"][snp.pos] = snp.alt h["B"][snp.pos] = snp.ref elif snp.gt == "0|1": h["A"][snp.pos] = snp.ref h["B"][snp.pos] = snp.alt return h
093dbe9739da74cce6d9bc434009a205b91ce5d8
50,337
def mrmmult(temp, covmat): """Matrix multiplication (MRM' or m'Rm).""" return temp @ covmat @ temp.T
65d7da0f4303a8414c884ed172d1123ca9033f34
50,339
def transitive_closure(graph, reflexive=False): """ Calculate the transitive closure of a directed graph, optionally the reflexive transitive closure. The algorithm is a slight modification of the "Marking Algorithm" of Ioannidis & Ramakrishnan (1998) "Efficient Transitive Closure Algorithms". :param graph: the initial graph, represented as a dictionary of sets :type graph: dict(set) :param reflexive: if set, also make the closure reflexive :type reflexive: bool :rtype: dict(set) """ if reflexive: base_set = lambda k: set([k]) else: base_set = lambda k: set() # The graph U_i in the article: agenda_graph = dict((k, graph[k].copy()) for k in graph) # The graph M_i in the article: closure_graph = dict((k, base_set(k)) for k in graph) for i in graph: agenda = agenda_graph[i] closure = closure_graph[i] while agenda: j = agenda.pop() closure.add(j) closure |= closure_graph.setdefault(j, base_set(j)) agenda |= agenda_graph.get(j, base_set(j)) agenda -= closure return closure_graph
40126120eede6b0944794311ee828382f3bfd1aa
50,340
def detect_cycle(ary:list) -> bool: """ フロイドの循環検出法 に従って、ループを雑に検出します。 aryにループがある場合はTrueを返却します。 Args: ary list: ループを検出したいリスト Returns: bool: aryにループがある場合はTrue。それ以外はFalse """ hare_iter = iter(ary) for tortoise in ary: try: hare = next(hare_iter) hare = next(hare_iter) except StopIteration as e: return False if tortoise == hare: return True return False
bf7599aade5060ba4d309a042e721974924a9b64
50,343
import math def calibrated_fps(calibrate): """Calibration of the dynamic frames per second engine. I've started with the equation y = log10(x + m) * k + n, where: y is the desired fps, m and n are horizontal and vertical translation, k is a calibration factor, computed from some user input c (see readme for details). Considering minfps and maxfps as given constants, I came to: fps = log10(x + 1) * k + minfps, which must be equal to maxfps for x = c, so the factor k = (maxfps - minfps) / log10(c + 1), and fps = log10(x + 1) * (maxfps - minfps) / log10(c + 1) + minfps Neat! ;) Args: calibrate (float): user provided Returns: a callable to calculate the fps """ min_fps, max_fps = 2., 60. calibrate = max(1e-6, calibrate) adjust_log_curve = 100. / min(calibrate, 100.) # adjust the curve for small numbers factor = (max_fps - min_fps) / math.log10((calibrate * adjust_log_curve) + 1.) def fps(rate): if rate <= 0: return 10. # bootstrap speed if rate < calibrate: return math.log10((rate * adjust_log_curve) + 1.) * factor + min_fps return max_fps return fps
8f51d14bc3b58a20e3a2e6775233569f65c0f511
50,344
def mut_pair_num(table): """ A function that calculates the number of pairs of codons one mutation away from each other. Treats mutations with directionality. In general, the number of mutational pairs is equal to the number of codons in a table multiplied by the number of unique codons within one mutation. Let a = alphabet length (generally 4), L = codon length (generally 3) n = (a^L) * L(a-1) Parameters ---------- dict table: the codon table to analyze Returns ------- int mut_num: the number of distinct mutational pairs. """ # get list of all codons in table codon_list = list(table) # get alphabet size alphabet = set() for codon in codon_list: for nt in codon: alphabet.add(nt) a = len(alphabet) # get codon length L = len(codon_list[0]) # calculate mut_num and return return (a ** L) * L * (a - 1)
ce44087d295ac2cf0860c364dbf18b4f2de500b1
50,346
def train_loop(model, optimizer, loss_fn, samples, labels, batch_size, seq_len, device='cpu', pre_trained=False): """ Standard pytorch training loop, using our helper loss function above. :param model: model to optimize :param optimizer: optimizer :param loss_fn: loss function :param samples: data in :param labels: labels out :param batch_size: batch size for sequences :param seq_len: sequence length :param device: device to put tensors on :param pre_trained: are we using pre-made embeddings or passing in indices? :return: model, loss, and accuracy """ loss_total = 0 acc_total = 0 total_samples = 0 # iterate through all samples, stepping by batch_size * sequence length and using # your loss function above to calculate loss. Then, zero gradients, backprop, step optimizer, and repeat # Also, store up the loss total, total number correct, and total number processed by the model so far # Return model, loss, and accuracy return model, loss_total, acc_total/total_samples
d97ec345f6e1bb1e1951a699bf171e5accca362e
50,347
import math def closest_power2(x): """Get the closest power of 2 checking if the 2nd binary number is a 1.""" op = math.floor if bin(x)[3] != "1" else math.ceil return 2**(op(math.log(x, 2)))
fa2d3025f83283b79d5c7e4c3babaacd873f3cb9
50,348
async def read_data(stream, length, decryptor=None) -> bytes: """Reads from the packet stream. If the received data is not equal to the length, it will keep reading until it meets that length. Args: stream: The packet stream to read from. length: The amount of bytes to read. decryptor: If the packet is encrypted, decrypt the packet for reading. Returns: bytes: The packet data that was read. """ packet_data = await stream.read(length) while len(packet_data) != length: packet_data += await stream.read(length - len(packet_data)) if decryptor is not None: return decryptor.update(packet_data) return packet_data
a43d1bd9c0128724922eb23d6aca84b7df9011ca
50,351
import importlib import json def create_object(config): """ Creates an object from the specified configuration dictionary. Its format is: class: The fully qualified path name. args: A list of positional arguments (optional). kwargs: A dictionary of keyword arguments (optional). """ try: module_name, _, class_name = config['class'].rpartition('.') module = importlib.import_module(module_name) cls = getattr(module, class_name) return cls(*config.get('args', []), **config.get('kwargs', {})) except Exception as e: raise Exception( 'Could not create object\n{}'.format(json.dumps(config, indent=4)), e )
3f39a1f09a664602b4beeaf35590470dc96a1db2
50,353
import os import subprocess def bias_field_correction(t1_in): """corrects field bias using fast method from fsl. It will save multiple nifti files in the directory (as described by FAST https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FAST#Fast) where t1_in is stored, however only the path to the biased corrected image will be returned ---------- t1_in: path to the nifti file path to the file to be biased corrected Returns ------- out_file: path to the nifti file path to the file biased corrected """ basename = 'bias' out_dir = os.path.dirname(t1_in) basename = os.path.join(out_dir, basename) if not os.environ.get('DISPLAY'): subprocess.run([ "nice", "-n", "10", "fast", "-t", "1", # is T1 "-o", basename, # basename for outputs "-B", # output restored image (bias-corrected image) t1_in]) # nifti image to bias correct else: subprocess.run([ "fast", "-t", "1", # is T1 "-o", basename, # basename for outputs "-B", # output restored image (bias-corrected image) t1_in]) # nifti image to bias correct out = basename + '_restore.nii.gz' return out
b9947f2ef9d3e62c659cd1db4b9e2aaf1fb40984
50,354
from typing import Counter def find_top_codes(df, col_name, n): """ Find the top codes from a columns of strings Returns a list of strings to make sure codes are treated as classes down the line """ string_total = df[col_name].str.cat(sep=' ') counter_total = Counter(string_total.split(' ')) return [word for word, word_count in counter_total.most_common(n)]
61a2a8c06f589ba2834bab58f34679aef96b2a2f
50,355
def bollinger_band(df, base, upper_target, lower_target, period): """ Function to compute Bollinger Bands (BB) This is a lagging indicator df - the data frame base - on which the indicator has to be calculated eg Close upper_target - column name to store upper BB value lower_target - column name to store lower BB value period - period of the bb """ df['{}MA'.format(period)] = df[base].rolling(window=period).mean() df['{}STD'.format(period)] = df[base].rolling(window=period).std() df[upper_target] = df['{}MA'.format(period)] + (df['{}STD'.format(period)] * 2) df[lower_target] = df['{}MA'.format(period)] - (df['{}STD'.format(period)] * 2) df = df.drop(['{}MA'.format(period), '{}STD'.format(period)], axis=1) return df
e9daecd68e6a41178a554acefbc460184855bca6
50,356
def boost_optimizer(bid_dict, boost_values, learning_rate): """ DOCUMENTATION """ boost_bid_dict = {} for i in bid_dict: if i in boost_values: boost_bid_dict[i] = bid_dict[i] * boost_values[i] else: boost_bid_dict[i] = bid_dict[i] second_high_bidder, high_bidder = sorted(boost_bid_dict, key=boost_bid_dict.__getitem__)[-2:] for i in boost_values and bid_dict: if i is not high_bidder: max_boost = boost_bid_dict[high_bidder] / bid_dict[i] boost_diff = boost_values[i] - max_boost new_boost = boost_values[i] - learning_rate * boost_diff boost_values[i] = new_boost else: min_boost = boost_bid_dict[second_high_bidder] / bid_dict[i] boost_diff = boost_values[i] - min_boost new_boost = boost_values[i] - learning_rate * boost_diff boost_values[i] = new_boost min_boost = sorted(boost_values.values())[0] for i in boost_values: boost_values[i] = boost_values[i] / min_boost return boost_values
e61bdb1a91b3dd46c704c041f9e42f1ce1d6e655
50,357
def UnicodeToCLiteral(s): """Converts a unicode string to a C-style escaped string (e.g. "\xe1\x84").""" s = s.encode('utf8') out = ['"'] for c in s: if ord(c) > 127: out.append(r'\x%.2x' % ord(c)) # To prevent the C++ compiler from interpreting subsequent characters as # part of the hex code, we break the string literal. # # E.g. [?feet] --> ["\x3f" "feet"] instead of ["\x3ffeet"] out.append('" "') else: out.append(c) out.append('"') return ''.join(out)
40b991e02df77b8e043a353002a286080bfba61a
50,358
def decode_topic_name(encoded: str) -> str: """ Reverses ``encode_topic_name``. :param encoded: the encoded SNS topic name :return: the decoded channel name """ decoded = encoded decoded = decoded.replace("_WCD_", "*") decoded = decoded.replace("_FWS_", "/") decoded = decoded.replace("_DOT_", ".") decoded = decoded.replace("_COL_", ":") decoded = decoded.replace("_LT_", "<") decoded = decoded.replace("_GT_", ">") return decoded
d8af5240645b1286bc119fdf162cb7645d439e0c
50,359
import torch def _sort_edges(edges): """sort last dimension of edges of shape (E, 2)""" with torch.no_grad(): order = (edges[:, 0] > edges[:, 1]).long() order = order.unsqueeze(dim=1) a = torch.gather(input=edges, index=order, dim=1) b = torch.gather(input=edges, index=1 - order, dim=1) return torch.stack([a, b], -1)
7024567c02a37626263b97ab4fcf3bea13e4ec83
50,361
import collections def count_domLetters(word): """The acutal algorithm that does the counting of the dominant letters in the input. Args: word ([type]): The word whose dominant letters are being counted. Returns: [int]: Returns the frequency of the dominant letter. """ # https://stackoverflow.com/questions/4131123/finding-the-most-frequent-character-in-a-string # count the letters in the word returning the most common letter and frequency domletter, freq = collections.Counter(word.lower()).most_common(1)[0] print(word.ljust(15), end="\t") print(f"{domletter: <2} appeared {freq} times", end="\t") return freq
8b617b9256bbfb6b8e0fb0a2204e5688a71f5f0d
50,362
import json def download_options(dataset, node, entityids, api_key=None): """ The use of the download options request is to discover the different download options for each scene. Some download options may exist but still be unavailable due to disk usage and many other factors. If a download is unavailable it may need to be ordered. :param dataset: :param node: :param entityIds: :param api_key: API key is not required. """ payload = { "apiKey": api_key, "datasetName": dataset, "node": node, "entityIds": entityids } return json.dumps(payload)
2c82faab4f1a74dfa95bc3acc9049919c47be2c2
50,363
def jsfy(df, songID): """Takes a data frame and song ID and outputs a Plotly graph converted to JSON""" features = [ 'acousticness', 'danceability', 'energy', 'instrumentalness', 'liveness', 'speechiness'] find_id = df['track_id'] == songID song_stats = df[find_id][features] print(song_stats) start = """{"data": [{"x":""" xs = list(song_stats.columns) middle = """, "y": """ ys = list(song_stats.values[0]) end = """, "type": "bar"}]}""" json = start + str(xs) + middle + str(ys) + end return json
ca3199e936e7e3b1620a9352731bad91f6972e0c
50,365
def scan_and_target_id_to_context_info(scan_id, target_id, all_scans_in_dict): """ Get context information (e.g., same instance-class objects) of the object specified by the target_id in the scene specified by the scene_id. :param scan_id: (string) scene0010_00 :param target_id: (int) 36 :param all_scans_in_dict: dict from strings: scene0010_00 to objects of ScannetScan :return: (chair, [35, 37, 38, 39], scene0010_00-chair-5-36-35-37-38-39) """ scene_objects = all_scans_in_dict[scan_id].three_d_objects target = scene_objects[target_id] instance_label = target.instance_label distractors = [x.object_id for x in scene_objects if x.instance_label == instance_label and x != target] half_context_info = [scan_id, instance_label, str(len(distractors) + 1), str(target_id)] context_string = '-'.join(half_context_info + [str(d) for d in distractors]) context_string = context_string.replace(' ', '_') return instance_label, distractors, context_string
bfcd36c0988a165405cf3f84a808f9fe43e3bc6c
50,366
import shlex def shlex_split(s, comments=False, posix=True): """ Splits a string using shell lexer, but returns any incomplete string as the last component instead of erroring for unmatched quotations. """ lex = shlex.shlex(s, posix=posix) lex.whitespace_split = True if not comments: lex.commenters = '' result = [] while True: try: tok = lex.get_token() except ValueError as e: print(repr(e)) # Append the current token result.append(lex.token) break else: if tok == lex.eof: break result.append(tok) return result
8708c423af6ffa9b69aacec0e05676879f7104c1
50,367
def get_summary_entry_template(): """ Get a template to describe each line in an assembly summary Keys have the following meanings: - has_assembly: Does this line of the summary have assembly code. - assembly: Information about the assembly in this summary line. - assembly/info: The assembly line information dictionary (as returned by :func:`~.get_assembly_line_template`) and filled in by the assembler. - has_mc_byte: Does this line of the summary have a machine code byte. - mc_byte: Information about the machine code byte on this line. - mc_byte/info: Machine code byte information dictionary (as returned by :func:`~.get_machine_code_byte_template` and filled by the assembly process). - mc_byte/has_label: Whether of not this machine code byte has an associated label. - mc_byte/label: The label of this machine code byte. Returns: dict: Summary entry template. """ return { "has_assembly": False, "assembly": { "info": {}, }, "has_mc_byte": False, "mc_byte": { "info": {}, "has_label": False, "label": "", } }
63ab6a0df80150a74b60ec1e8094d938d9d15879
50,368
def bib_sublist(bibfile_data, val_type): """ Sublist of bibfile_data whos elements are val_type This method examines each bib_dict element of a bibfile_data list and returns the subset which can be classified according to val_type. :param list bibfile_data: List containing `RefFile`s. :param type val_type: :rtype: list """ sublist = [bibfile for bibfile in bibfile_data if isinstance(bibfile.bib, val_type)] return sublist
1865e5af22b873b5b43a1b1cde7982e92aa77226
50,370
def render(dot, desc_filename, suffix): """Outputs the graph.""" return dot.render(desc_filename + suffix)
1e32d709624950be213fbadb21d67b55f1486392
50,372
import base64 def convert_image(filename: str) -> str: """Converts image to string. Args: filename: The name of the image to convert. Returns: The image converted to serializable string representation. """ with open(filename, 'rb') as file: converted = base64.b64encode(file.read()).decode() return converted
28d0341a76ee2323683225606a8fc4b80205cb28
50,373
def column_to_list(data:list, prop:str): """ Agrupa os valores de uma coluna de dados para uma lista args: data_list (list): Uma lista de dados (list/dict) prop (str): Uma key ou index return (list): Uma lista dos valores mapeados como 'prop' em cada item da lista informada 'data' """ return [item.get(prop, None) for item in data]
18ddae43a15cee920d8f3789dc23fe019ef2b63f
50,374
def get_colours(query, clusters): """Colour array for Plotly.js""" colours = [] for clus in clusters: if str(clus) == str(query): colours.append('rgb(255,128,128)') else: colours.append('blue') return colours
c06d07a7da8bf758c290c2b5ce80fbe0015bea4a
50,375
def transform_box_coord(H, W, box_vertices, dataset_name, high_rez=False, scaling_factor=1): """ Transform box_vertices to match the coordinate system of the attributions :param H: Desired height of image :param W: Desired width of image :param box_vertices: :param dataset_name: :param high_rez: :param scaling_factor: :return: transformed box_vertices """ if high_rez: H = H * scaling_factor # W = W * scaling_factor x_range, y_range = None, None if dataset_name == 'CadcDataset': # x_range = 100.0 y_range = 100.0 elif dataset_name == 'KittiDataset': '''Note: the range for Kitti is different now''' # x_range = 70.4 y_range = 79.36 elif dataset_name == 'WaymoDataset': y_range = 168.96 new_scale = H / y_range # print('H: {}'.format(H)) if dataset_name == 'KittiDataset': for vertex in box_vertices: vertex[0] = vertex[0] * new_scale vertex[0] = H - vertex[0] vertex[1] = vertex[1] * new_scale else: for vertex in box_vertices: vertex[0] = vertex[0] * new_scale vertex[0] = H - vertex[0] vertex[1] = vertex[1] * new_scale return box_vertices
5b4e915d4f42122a87c49cbc9cb5b93abfb1ce38
50,376
def transaction_to_itemset(T): """ Converts each record of a database in to a itemset format """ result = set() for i in range(len(T)): if T[i]!=0: result.add(i+1) return(result)
42a6b812e9b8e2c14180cbc6fd8e74e2d32d10c7
50,377
def whoosh_url(): """Return Whoosh URL.""" return 'whoosh:///home/search/whoosh_index'
918567f21feee372c2b69a5867aca151554a6998
50,378
import subprocess def run_cmd(cmd): """ run cmd on frontend machine """ return subprocess.check_output(cmd,shell=False)
d4a4f2d923734bc91b47cb05031faa5d4666e3ff
50,379
def short_assets(S): """ Create synthetic short assets. """ X = S / S.shift(1) # shorting X = 2 - X X.iloc[0] = S.iloc[0] # reconstruct original return X.cumprod()
0d575e26fcb3ada3a373666cc40de4af904aa101
50,380
import requests def connect(url): """http request to corona API""" try: with requests.get(url, timeout=3) as requestapi: if requestapi.status_code == 200: return requestapi.json() return False except requests.exceptions.RequestException as requestapiexception: raise SystemExit(requestapiexception)
ec7418766c66b50c8b2db6e1088146cabba3edce
50,381
def tri(s) : """ Renvoit le mot trié pour repérage anagramme """ return "".join(sorted(list(s)))
4cfa0e226aef52e10927f086d0d840e387c61268
50,382
import os import yaml import click def read_chart_data(app, verbose=None): """ # Read local chart data """ chart_file_path = os.path.join(app.config['ftl']['chart_data_path']) chart_data = {} with open(chart_file_path, 'r') as fh: chart_data = yaml.safe_load(fh.read()) if verbose: click.echo(f'<--\nchart_data:\n{chart_data}\n-->') return chart_data
4b674b1347b0839f23689f1bf07b02a75b7f4ea9
50,387
def GetChunks(data, size=None): """ Get chunks of the data. """ if size == None: size = len(data) start = 0 end = size chunks = [] while start < len(data): chunks.append(data[start:end]) start = end end += size if end > len(data): end = len(data) return chunks
51bdfe5334292a700c660def7e3306b6fa528398
50,389
def serchNotBlackPixel(image): """ Cerca il pixel non nero nell'immagine di coordinata y inferiore (dove la mascherina inizia) :param image: matrice di pixel :return: coordinate (i,j) del pixel individuato """ imgShape = image.shape print("Size img : ", imgShape) for i in range(imgShape[0]): for j in range(imgShape[1]): # if image[i, j] != [0, 0, 0]: if (image[i, j] != [0, 0, 0]).all(): # print("Trovato = ", i, " - ", j) # print("image[i, j] = ", image[i, j]) return (i, j)
f75d434a79c5bb517f50f11707ab07ff9f9dee8f
50,391
def remove_duplicate_words(text: str) -> str: """Remove duplicate words. It is a general-purpose function, which can remove duplicate words next to each other, and preserve only one of them. Args: text (str): Accepts only one element (i.e., scalar). Returns: A text variable of <class 'str'> after removing duplicate words. Examples: >>> input_text = 'Potter Potter I have a message for you' >>> remove_duplicate_words(input_text) 'Potter I have a message for you' """ result = [] for word in text.split(): if word not in result: result.append(word) return " ".join(result)
2f78193325266b47fd55b340989822c62fb6b6df
50,395
def get_common_elements(element_list): """ :param element_list: list of list where each internal list contains values :return: a sorted list of elements which are common in all the internal lists """ common_element_list = set(element_list[0]) index = 1 while index < len(element_list): common_element_list = common_element_list.intersection(element_list[index]) index += 1 return sorted(list(common_element_list))
fa3233bb2945949837fd70db4d75f5803100e3ee
50,396
def user_file(filename, username="master"): """Return json file for the user and given filename.""" assert username, "user_file: empty username." if username == 'master': return './library/' + filename + '.json' else: return './users/' + username + '/' + filename + '.json'
89ec038990eae7b285428ff9e8c7e70609cb9de3
50,397
def ddpg(loss): """ paper : https://arxiv.org/abs/1509.02971 + effective to use with replay buffer - using grads from critic not from actual pollicy ( harder to conv ) """ grads = loss return loss
1ed110c0827dfe489bb32290cd1bb1fe799b5089
50,398
import torch def compute_active_units(mu, delta): """Computes an estimate of the number of active units in the latent space. Args: mu(torch.FloatTensor): [n_samples, z_dim]. Batch of posterior means. delta(float): variance threshold. Latent dimensions with a variance above this threshold are active. Returns: int: the number of active dimensions. """ outer_expectation = torch.mean(mu, 0) ** 2 inner_expectation = torch.mean(mu ** 2, 0) return torch.sum(inner_expectation - outer_expectation > delta).item()
cdbd24ba9735f48f5c92b3c028106d7824a2e3cf
50,399
def decode_qwikcord(packet, channel=1): """Extract the qwikcord current measurements from val (CTavg, CTsum).""" val = str(packet.get('val', '')) if len(val) != 16: return None if channel == 1: return int(val[6:12], 16) # CTavg return int(val[12:], 16)
d0edf4244b5d62d892e5ce71c966145e82b5dc37
50,400
def is_odd(number: int) -> bool: """ checks whether number is odd, returns boolean """ return bool(number & 1)
003658703e9263bdfeed9c97e5270a30c4a5ade8
50,401
def has_afg_license(instr): """Returns True if the first license includes an AFG license""" return "AFG" in instr.query("LIC:ITEM? 0").strip().split('"')[3].split(",")
0b9b2d65b7f910d3a4e412f67c76c5333d4f7d7b
50,403
def line_perpendicular(k,b,x): """ @param k: y=kx+b @param b: y=kx+b @param x: where the perpendicular has to intersect the line """ # y = k*x+b k_perp = -1./k b_perp = (k - k_perp) * x + b return k_perp, b_perp
e72c8c65a5e11f1927f01548407687ad84837b89
50,404
import os def key_name_from_path(path): """Convert a relative path into a key name.""" key_parts = [] while True: head, tail = os.path.split(path) if tail != '.': key_parts.append(tail) if head == '': break path = head return '/'.join(reversed(key_parts))
ffdfb84067144968530db540cd084c0e8f6d74d0
50,405
def resolve_set_to_value(value_set, default_value, error_message): """Resolve a set of values to a single value, falling back to a default value if needed. If it is unresolvable, produce an error message. """ if len(value_set) == 0: return default_value elif len(value_set) == 1: return list(value_set)[0] # should be single value raise ValueError(error_message)
f8d8cdf9dbbf73d7382fbf0fb37e217c975892f9
50,406
def ostr(string): """ Truncates to two decimal places. """ return '{:1.2e}'.format(string)
f4dc4fdf919f630d155b4be41ecb79abdc430c66
50,407
import torch def batch_shuffle(batch: torch.Tensor): """Returns the shuffled batch and the indices to undo. Examples: >>> # forward pass through the momentum model with batch shuffling >>> x1_shuffled, shuffle = batch_shuffle(x1) >>> f1 = moco_momentum(x1) >>> out0 = projection_head_momentum(f0) >>> out1 = batch_unshuffle(out1, shuffle) """ batch_size = batch.shape[0] shuffle = torch.randperm(batch_size, device=batch.device) return batch[shuffle], shuffle
383aa4d51c1654fee1953242c14a193bda54e057
50,408
def pooled_prob(N_A, N_B, X_A, X_B): """Returns pooled probability for two samples""" return (X_A + X_B) / (N_A + N_B)
55beb8fc549fb0d71db16764738d7cdc9c570825
50,409
def compute_time(sign, FS): """Creates the signal correspondent time array. """ time = range(len(sign)) time = [float(x)/FS for x in time] return time
7d6bcc3a8f54d199a6bec9d46b0fe5bbdfeeb052
50,410
def __max_value_index(list): """ Find the idx of the max value in list list -- numeric list """ max_val = max(list) max_idx = list.index(max_val) return max_idx
f94cc5629711000c6dcffb059ffe0c9bbdab62cf
50,411
def get_sqr(sudoku, row, col): """zwraca kwadrat 3x3 zawierajacy pole (row,col)""" return [sudoku[x][y] for x in range(9) if x//3 == row//3 for y in range(9) if y//3 == col//3]
7cf2822ebedb6dcc502eea99d54d869f25741cc3
50,412
def isNA(string): """Filter #N/A from incoming tables.""" if string == "#N/A" or string == "NA": return True else: return False
51ae99a110c8555f31e9b62c3b6edc2852c107c0
50,413
def validDate(date: str) -> bool: """Return whether a string follows the format of ####-##-##.""" if len(date) == 10: return date[0:4].isnumeric() and date[5:7].isnumeric() and date[8:10].isnumeric() and date[4] == "-" and date[7] == "-" return False
311eafdc794a97ff9b65c21b4ee79edd039c3027
50,414
def main(vault, keyname): """Return keyname entry from vault in plain text.""" return vault.require(keyname)
0e4e0cd2bdf178073e226025b1febf3b2fc26f8e
50,415
def topsoil(): """ Properties of typical topsoils Following main site type (1-4) classification """ topsoil = { 'mineral':{ 'topsoil_id': 1, 'org_depth': 0.05, 'org_poros': 0.9, 'org_fc': 0.33, 'org_rw': 0.15 }, 'fen':{ 'topsoil_id': 2, 'org_depth': 0.05, 'org_poros': 0.9, 'org_fc': 0.514, 'org_rw': 0.15 }, 'peatland':{ 'topsoil_id': 3, 'org_depth': 0.05, 'org_poros': 0.9, 'org_fc': 0.514, 'org_rw': 0.15 }, 'openmire':{ 'topsoil_id': 4, 'org_depth': 0.05, 'org_poros': 0.9, 'org_fc': 0.514, 'org_rw': 0.15 } } return topsoil
6b4b986a09728cc148260571eea0309c13aa8890
50,416
import struct def enc_float(val): """Encode a single float""" return struct.pack("!f", val)
f4d6d3fff683c3b64dcebc97c48b4ab8e3815f91
50,417
def fizzbuzz(num): """Function returns fizzbuzz if divisible by 3 and 5, buzz if divisible by 5, fizz if divisible by 3, and returns num if none of those conditions met.""" arr = [] for i in range(1, num + 1): if i % 3 == 0 and i % 5 == 0: arr.append('FizzBuzz') elif i % 5 == 0: arr.append('Buzz') elif i % 3 == 0: arr.append('Fizz') else: arr.append(i) return(arr)
4759e0d2ffc95dc61c6c0129bba2acb88a775d1c
50,418
def _strip_list(list): """ strips all empty elements from a list, returns the stripped list :param list: the list to be stripped :return: the stripped list """ return [x for x in list if x]
e000a3335fbcab640981a825a698586e502f89b1
50,420
def join_fields(fields): """ Join a bunch of key/value pairs together. """ return ', '.join('%s="%s"' % pair for pair in fields.iteritems())
147a2add910046f48d403f404ec9333e4532ea56
50,421
def is_prime(n): """ Check if n is a prime number. Sample usage: >>> is_prime(0) False >>> is_prime(1) True >>> is_prime(2) True >>> is_prime(3) True >>> is_prime(4) False >>> is_prime(5) True """ if n <= 0: # This is only for numbers > 0. return False for x in range(2, n): if n%x == 0: return False return True
031948206a9b38ac12d6a0262eb749b8c5e18ca8
50,422
def find_ep_ix(eps, t): """Finds epoch during time t""" for i, ep in enumerate(eps): if t >= ep.t0: try: test = t < eps[i+1].t0 except IndexError: test = t <= eps[i].t1 if test: return i else: raise ValueError("t must be in the epochs' time intervals")
c4c39a82a925aed5b44442246449d295b2a398c6
50,423
import random def random_mac_address(local_admin=True): """ Generates and returns a random MAC address. """ # By default use a random address in VMWare's MAC address # range used by VMWare VMs, which has a very slim chance of colliding # with existing devices. mac = [ 0x00, 0x05, 0x69, random.randint(0x00, 0x7f), random.randint(0x00, 0xff), random.randint(0x00, 0xff) ] if local_admin: # Universally administered and locally administered addresses are # distinguished by setting the second least significant bit of the # most significant byte of the address. If the bit is 0, the address # is universally administered. If it is 1, the address is locally # administered. In the example address 02-00-00-00-00-01 the most # significant byte is 02h. The binary is 00000010 and the second # least significant bit is 1. Therefore, it is a locally administered # address.[3] The bit is 0 in all OUIs. mac[0] |= 2 return ':'.join('{0:02X}'.format(o) for o in mac)
d72a702887c3e51f7917991af596a8dbdd1c3ab3
50,425
import pkg_resources def package_details(): """ Returns a dictionary with details of installed packages in the current environment """ details = {} packages = pkg_resources.working_set for package in packages: # pylint: disable=E1133 details[package.project_name] = package.version return details
219140f54cadd58ac8dfea9b78081ad62ed76057
50,426
def crf_preprocess_candidates(candidates): """Receive annotated candidates and return features and labels list""" features = [] labels = [] for candidate in candidates: candidate_features = [] candidate_labels = [] for token_features, label in candidate: candidate_features.append(token_features) candidate_labels.append(label) features.append(candidate_features) labels.append(candidate_labels) return features, labels
91c8f941a9334d26a8ac0623201c13ca560cfeb0
50,428
def _BasenameFromPath(path): """Extracts the basename of either a Unix- or Windows- style path, assuming it contains either \\ or / but not both. """ short_path = path.split('\\')[-1] short_path = short_path.split('/')[-1] return short_path
90dd65c95ef61e48132d7f0404a5c4d5bef685c1
50,429
import math def distance_between(vec1, vec2): """ :param vec1: list of (x1,y1,z1) values :param vec2: list of (x2,y2,z2) values :return: list of (abs(x1-x2), abs(y1-y2), abs(z1-z2)) values """ if len(vec1) != len(vec2): raise IndexError("vec1 and vec2 don't have the same number of elements") return [math.hypot(abs(vec1[i][0] - vec2[i][0]), abs(vec1[i][1] - vec2[i][1])) for i, _ in enumerate(vec1)]
4ba5356d8cd70a9630283038c781304c248e0bda
50,430
def has_case_updates(case_block_kwargs): """ Returns True if case_block_kwargs contains case changes. >>> has_case_updates({"owner_id": "123456", "update": {}}) True >>> has_case_updates({"update": {}}) False """ if case_block_kwargs.get("update"): return True if case_block_kwargs.get("index"): return True return any(k for k in case_block_kwargs if k not in ("update", "index"))
e80ec5d38b7d7b05983d6672df681c2efb4d3d1d
50,434
import torch def mmd_neg_biased(X, Y, k): """ Calculates biased MMD^2 without the S_YY term, where S_X, S_XY and S_YY are the pairwise-XX, pairwise-XY, pairwise-YY summation terms respectively. :param X: array of shape (m, d) :param Y: array of shape (n, d) :param k: GPyTorch kernel :return: MMD^2, S_X, S_XY, S_Y """ m = X.shape[0] n = Y.shape[0] X_tens = torch.tensor(X, dtype=torch.float32) Y_tens = torch.tensor(Y, dtype=torch.float32) S_X = (1 / (m ** 2)) * torch.sum(k(X_tens).evaluate()) S_XY = (2 / (m * n)) * torch.sum(k(X_tens, Y_tens).evaluate()) return (S_XY - S_X).item(), S_X.item(), S_XY.item()
fa45797415224f171c6ce900f5a83e8ce13e9ded
50,435
import re def normalize_text(text): """ 标准化处理 转换为小写,并去除网址、数字以及特殊符号 """ text = text.lower() text = re.sub('((www\.[^\s]+)|(https?://[^\s]+)|(pic\.twitter\.com/[^\s]+))', '', text) text = re.sub('@[^\s]+', '', text) text = re.sub('#([^\s]+)', '', text) text = re.sub('[:;>?<=*+()&,\-#!$%\{˜|\}\[^_\\@\]1234567890’‘]', ' ', text) text = re.sub('[\d]', '', text) text = text.replace(".", '') text = text.replace("'", '') text = text.replace("`", '') text = text.replace("'s", '') text = text.replace("/", ' ') text = text.replace("\"", ' ') text = text.replace("\\", '') # text = re.sub(r"\b[a-z]\b", "", text) text = re.sub('\s+', ' ', text).strip() return text
4c0ecf208eb78734a0263ac0fad0f368c931a6a5
50,436
import logging def init_logger( _logger: logging.RootLogger, log_level: int, log_file: str ) -> logging.RootLogger: """Initialise the logger. :param logging.RootLogger _logger: Logger instance to initialise. :param int log_level: Desidered logging level (e.g. ``logging.INFO``). :param str log_file: Path to destination file for logging output. If no output file is provided (``log_file`` is ``None``) logs will be written to standard output. :return: The initialised logger object. :rtype: logging.RootLogger """ # Initialise the logger _logger.setLevel(log_level) if log_file is not None: handler = logging.FileHandler(filename=log_file, mode='w') else: handler = logging.StreamHandler() formatter = logging.Formatter( '%(asctime)s %(name)-12s %(levelname)-8s %(message)s' ) handler.setFormatter(formatter) _logger.addHandler(handler) _logger.info("Logger successfully initialised") return _logger
6bceb87729e66c0e037b7667d572d001b0ab981e
50,438
from functools import reduce import operator def product(nums): """Returns the product of all values Attributes: nums (iterable) Iterator of intergers """ return reduce(operator.mul, nums, 1)
f1f25302014e024300220e575818dfd47c80f14a
50,440
def Idetifiers(): """ 0th extension header keywords. Used to select and pair FITS files. """ common = {'PROPOSID': 11853, 'SCLAMP': 'TUNGSTEN', 'OBSMODE': 'ACCUM', 'CCDAMP': 'D', 'CCDGAIN': 4} clear = {'APERTURE': '50CCD', 'PROPAPER': '50CCD', 'FILTER': 'Clear', 'APER_FOV': '50x50'} f28x50lp = {'APERTURE': 'F28X50LP', 'PROPAPER': 'F28X50LP', 'FILTER': 'Long_Pass', 'APER_FOV': '28x50'} return common, clear, f28x50lp
82847dadc1297d4896b91a04a51385e02b4abb81
50,444
def sort_and_count_inversions(aList): """Return an inversion count and sorted list""" inversionCount = 0 sortedList = [] n = len(aList) # Check base case if n <= 1: # If the list has 1 or 0 elements, there are no inversions # and nothing to sort return 0, aList # Recursively call for first half of list firstCount, firstList = sort_and_count_inversions(aList[0:int(n/2)]) # Recursively call for second half of list secondCount, secondList = sort_and_count_inversions(aList[int(n/2):]) # Merge the two lists together while looking for split inversions firstLength = len(firstList) secondLength = len(secondList) i = 0 j = 0 for z in range(n): # Make sure we won't try to access past the end of the array # If we've reachd the end of the first array, then # add the element from the second array. if i == firstLength: sortedList.append(secondList[j]) j += 1 # If we've reached the end of the second array, then add # the element from the first array elif j == secondLength: sortedList.append(firstList[i]) i += 1 # The normal case (before we've reached the end of the arrays) elif firstList[i] < secondList[j]: sortedList.append(firstList[i]) i += 1 else: sortedList.append(secondList[j]) j += 1 # Here are some split inversions! # ...which is equal to the number of items remaining # in the first list. inversionCount += firstLength - i # Add the non-split inversions for the final total of inversions inversionCount += firstCount + secondCount return inversionCount, sortedList
89e014f3a6c732675eaa3ea73745d335c0dc4a0b
50,448
def build_api_link(service_name, callback_url): """ Utility for building UDID.io API links """ api_link = 'https://get.udid.io/thirdparty/api/?callback=%(callback)s&service=%(service)s&schemeurl=0' % { 'callback': callback_url, 'service': service_name } return api_link
cd49d06201d70a7cb0d01bc8642cce06c47738af
50,449
def get_version(server_id, version, client): """Requests a list of certain version of server with ID.""" return client.get_server_version(str(server_id), version)
06319e8563e7075d246be73571cac51867c2a9bb
50,452
def linux_notify(title: str, message: str) -> str: """Display notification for Linux systems""" command = f'''notify-send "{title}" "{message}"''' return command
ae702eed884e35fccaf974898de9cc0c12b686c2
50,453
def get_default_qm7_task_names(): """Get that default qm7 task names and return measured expt""" return ['u0_atom']
fc64aac53bb34daac7fad436ea6f6ae256c71a79
50,454
def extractCoordinates(dataframe1,dataframe2,paired): """ Extracts coordinates of paired nodes in both scans Parameters ---------- dataframe1 : pandas.DataFrame() Dataframe containing data from the lower resolution scan dataframe2 : pandas.DataFrame() Dataframe containing data from the higher resolution scan paired : list of tuples List of each pair of the most similar nodes across both scans, represented by the id of the nodes """ keys = ['node_ZYX_mm 0','node_ZYX_mm 1','node_ZYX_mm 2'] coordsA = [] coordsB = [] for i in range(len(paired)): x = [] y = [] for key in keys: x.append(dataframe1.loc[paired[i][0],key]) y.append(dataframe2.loc[paired[i][0],key]) coordsA.append(x) coordsB.append(y) return coordsA,coordsB
131661e0b95d04f8e76f1d9a45e9c32f7344e051
50,455
import typing def handle_authentication() -> typing.Any: """ There is no need for authentication, as no files are created. """ return None
0db5b4b2f97312658696554d9c669b30493b8cdb
50,458
import yaml import logging def read_configuration_from_file(file: str) -> dict: """ Reads configuration from YAML file """ with open(file, "r") as stream: try: return yaml.safe_load(stream) except yaml.YAMLError as exc: logging.exception("Unable to parse configurations") return {}
07bb844650e7df5c84b5b415b146eafb739f4e86
50,459
from typing import List def join_words_cat(words: List[str]) -> str: """Joins words using string concatenation""" sentence = "" for word in words: sentence += word return sentence
f47f2ea2f1e2fa9e53bb586283f6d8f2ba6af3cc
50,460
def any(): """ Can be used to match any value. """ class Any: def __eq__(self, other): return True return Any()
3ea8946406cf59b22371d55427effffd35f78427
50,461
def array_chunk_slice(array, size): """Return an array containing chunks of the specified array with the specified size, using "slice".""" result = [] for i in range(0, len(array), size): result.append(array[i: i + size]) return result
1388bfd67bcd1689fb474b7a5b8495680915ed5b
50,463
def gather_dictQPV(Dinit, Dmodified, index, D0, D1, D2): """ Gather the Thresholds dictionary to use it in Electre Tri :param Dinit: Initial dictionary containing the form of the gathered dictionary. :param Dmodified: Modified dictionary to be included in gathered dictionary :param index: Index of modified parameter : position of modified dictionary in the gathered dictionary :param D0: Dictionary number 1 to be included in the gathered dictionary :param D1: Dictionary number 2 to be included in the gathered dictionary :param D2: Dictionary number 3 to be included in the gathered dictionary :param D3: Dictionary number 4 to be included in the gathered dictionary :return the Thresholds dictionary, modified and gathered """ Dgathered = {} for i in range(0, len(Dinit)): if index == 0: # Modification de Q Dgathered[list(Dinit.keys())[i]] = ( list(Dmodified.values())[i], list(D1.values())[i], list(D2.values())[i]) elif index == 1: Dgathered[list(Dinit.keys())[i]] = ( list(D0.values())[i], list(Dmodified.values())[i], list(D2.values())[i]) elif index == 2: Dgathered[list(Dinit.keys())[i]] = ( list(D0.values())[i], list(D1.values())[i], list(Dmodified.values())[i]) return Dgathered
62c29b1b9f6430e0a1f43b2a64daf37d018a13e1
50,464
import os def mydocuments(): """ get the path to the MyDocuments folder """ return os.path.expanduser('~/documents')
db8b0f3fe24160f971f8b1f3d33e5c0263401418
50,467
def create_biomarker_schema(schema: dict) -> dict: """ Factory method for creating a schema object. Arguments: schema {dict} -- Cerberus schema dictionary. Returns: dict -- EVE endpoint definition. """ base_dict = { "public_methods": [], "resource_methods": ["GET", "POST"], "allowed_roles": ["user", "uploader", "admin", 'system'], "allowed_item_roles": ["user", "uploader", "admin", 'system'], "schema": { "trial": {"type": "objectid", "required": True}, "assay": {"type": "objectid", "required": True}, "record_id": {"type": "objectid", "required": True}, }, } base_dict["schema"].update(schema) return base_dict
6e534a7ecdb54c9c23d811430a89cf98bf3e9bdd
50,469
def zigpy_zll_device(zigpy_device_mock): """ZLL device fixture.""" return zigpy_device_mock( {1: {"in_clusters": [0x1000], "out_clusters": [], "device_type": 0x1234}}, "00:11:22:33:44:55:66:77", "test manufacturer", "test model", )
b3a5a6773857edfc3d26e87e1c6a20052b80ec7f
50,470
def unique_col(name, id_): """Ensure uniqueness of a header/data entry. Simply apply this to both the entry in `headers` and the dict keys in `rows` before passing them to one of the spreadsheet functions. :param name: The actual column name/title :param id_: The id or whatever is needed to ensure uniqueness """ return name, id_
a945e63e0e35f0407a2a235ec98cf45e92d5998b
50,471
import re def compile_terms(terms): """ Compile terms as regular expression, for better matching. """ return [re.compile(re.escape(term), re.I | re.U) for term in terms]
de614da9e9f35b2dee61b07f649ed7559757cc4c
50,472