content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _is_page_404(soup: str) -> bool: """Checks if a 404 page is returned Parameters ---------- soup : str html content from a webpage; will attempt to coerce to str if not str Returns ------- bs4 object hrml content from bs4 html parser """ if 'str' not in str(type(soup)): soup = str(soup) if '<h1>404 - Not Found</h1>' in soup: return True else: return False
715b515463b61386133983f757fff0502145beaa
20,057
from pathlib import Path def get_file_path(path: Path, fn: str) -> Path: """ Find an available path for a file, using an index prefix. Parameters ---------- path: Path file path fn: str filename Returns ---------- path file path """ paths = path.glob(f"*_{fn}") max_index = max((int(p.name.split("_")[0]) for p in paths), default=-1) + 1 return path / f"{max_index}_{fn}"
54182f627cded53f5f6dabb5b32965bd1693a4ac
20,058
def filter_judge_scores(scores, judge): """ Filters all scores for a team based on judge """ judge_scores = scores.filter(judge=judge) if judge_scores: return judge_scores return
57ca0e1d3902e31f1fb4b6f70819d370d2fd840f
20,059
def get_referrer(request): """Return referrer""" referrer = request.META['HTTP_REFERER'] return referrer
86ed6499bd90f92b8cc1a5f27da71dacba4f7653
20,060
def shift_left(number, shift): """The function receive a binary number such as a=bin(12) and compute a left shift in this number by the value of a shift parameter. number: an integer number shift: number of bits shifted to the left """ n = number n1 = '0b' + n[2 + shift:] + n[2:2 + shift] if len(n1) == 34: return n1 else: diff = abs(len(n1) - 34) return '0b' + '0' * diff + n1[2:]
41ac3cee35b4a00d36c83f718c0439900d6b4285
20,062
import logging def get_logger(name=None): """Return a logger to use. Parameters ---------- name : None or str, optional Name of the logger. Defaults to None. Returns ------- logging.Logger logger object. """ return logging.getLogger("bids-schema" + (".%s" % name if name else ""))
e1bf5e385615a26391b5121752300e53d8e288d5
20,063
def _binary_search(array, elt): """Modified binary search on an array.""" start = 0 end = len(array) - 1 while start <= end: mid = (start + end) // 2 if elt == array[mid]: return mid + 1 if elt < array[mid]: end = mid - 1 else: start = mid + 1 if start > end: return start
857e7d0d32522b11f30027eff4294efd6b5f5ac0
20,064
import argparse def parse_args(): """ Parses AROPE arguments. """ parser = argparse.ArgumentParser(description="Run GAE.") parser.add_argument('--inputgraph', type=str, help='Input graph path') parser.add_argument('--output', type=str, help='Path where the embeddings will be stored.') parser.add_argument('--tr_e', nargs='?', default=None, help='Path of the input train edges. Default None (in this case returns embeddings)') parser.add_argument('--tr_pred', nargs='?', default='tr_pred.csv', help='Path where the train predictions will be stored.') parser.add_argument('--te_e', nargs='?', default=None, help='Path of the input test edges. Default None.') parser.add_argument('--te_pred', nargs='?', default='te_pred.csv', help='Path where the test predictions will be stored.') parser.add_argument('--dimension', type=int, default=2, help='Embedding dimension. Default is 2.') parser.add_argument('--delimiter', default=',', help='The delimiter used to separate node ids in input file. Default is ","') parser.add_argument('--verbose', default=False, action='store_true', help="Print training loss and accuracy during optimization.") return parser.parse_known_args()
0c7b697370143f2a6ca8216eab109021937443a5
20,065
def compile_playable_podcast(playable_podcast): """ @para: list containing dict of key/values pairs for playable podcasts """ items = [] for podcast in playable_podcast: items.append({ 'label': podcast['title'], 'thumbnail': podcast['thumbnail'], 'path': podcast['url'], # 'info': podcast['desc'], 'is_playable': True, }) return items
76747634ea8137c2c16e2bd0118928963f7c9a3b
20,066
def clean_onnx_name(name: str) -> str: """Modifies a onnx name that is potentially invalid in dace to make it valid""" return "ONNX_" + name.replace(".", "DOT").replace(":", "COLON").replace( "/", "SLASH").replace("-", "DASH")
c9c075d2766746e47bf08a6069cc59cf2dd8dea1
20,067
def dynamic_filter(func): """Function decorator that sets the wrapped function's 'dynamic_filter' attribute to True. """ func.dynamic_filter = True return func
a59cae5d20c367a1966cb35ca1c5b80ccd9d895a
20,068
def get_strands(names, strand_dict): """Get list of sequences from list of names.""" strands = [] for name in names: assert name in strand_dict, "Strand %s referenced before definition" % name strands.append(strand_dict[name]) return strands
94e1a5f02bb91c5d9aa440461dd435cd8e77a522
20,069
import argparse def parseArgs(): """ A list of options and hyperparameters can be passed through bash commands while running this script """ parser = argparse.ArgumentParser(description='List of script options and network hyperparameters') parser.add_argument('-c', '--channels', metavar='', type=int, default=32, help='approximate number of first level channels in the model - default value = 32') parser.add_argument('-m', '--model_name', metavar='', type=str, default="model", help='save the trained model into or load a trained model from "model_name.pt" - defaul name = "model"') return parser.parse_args()
aa47c9ffd1688c4dcfb9001349fa959ecc32d40a
20,070
def transform_includes_noext(soup, extractedmanifest, transformedmanifest): """ Softcover's input command automatically adds tha .tex extention so must edit all \input statemnts to remove the `.tex` extensio for this to work. """ inputs = soup.find_all('input') for input in inputs: includerelpath = str(input.string) includerelpath_noext = includerelpath.replace('.tex', '') input.string = includerelpath_noext return soup
80fb05ec24324021e3a2806145057c76c2cf7617
20,071
def actual_svg(pathname: str) -> str: """Read SVG image from disk.""" with open(pathname, "r") as file: svg = file.read() return svg
ba7ae52d3bdbae1d3112a183de2f484f4bcc066d
20,073
def squash_flags(flags): """Remove lowercase flags if the respective uppercase flag exists >>> squash_flags('abc') 'abc' >>> squash_flags('abcC') 'ab' >>> squash_flags('CabcAd') 'bd' """ exclude = ''.join(f.upper() + f.lower() for f in flags if f == f.upper()) return ''.join(f for f in flags if f not in exclude)
1484a7f4b1764e1c48dc4eb07e15db98a7bb9881
20,075
def split_by_values(data, sort_col, value_range): """ Takes a 2d list of data, the number of the column to sort by, and a range (an array of length 2, where range[0] is the min and range[1] is the max) sort lists by variable in the sorting column (relies on this value being of integer type) - for each value within the given range a separate list of lists is created - if a value in the rage is not found in the sorting column it will result in an empty list """ data_sorted = [[] for i in range(value_range[0], value_range[1])] error_count = 0 for row in data: if value_range[0] <= row[sort_col] < value_range[1]: data_sorted[row[sort_col] - value_range[0]].append(row) elif row[sort_col]: error_count += 1 if error_count: print("Warning:", error_count, "rows were rejected as their values did not fall in the range(", value_range[0], ":", value_range[1], ")") return data_sorted
d1b3838d2142e306d4e485e36cad52d043fe221f
20,076
def verifica_punct(punct, linii, coloane): """Verifica daca un anumit punct se afla in interiorul imaginii """ return 0 <= punct[0] < linii and 0 <= punct[1] < coloane
073f50d3654deb87bb5c363bd79e2353d67c0c1a
20,077
import os def mkdir_if_not_exists(path): """ Create a directory if it does not already exists. :param path: Directory path :return: Return True if the folder exists or has been correctly created, False if there is a problem. """ try: if not os.path.exists(path): os.makedirs(path) return True except: pass return False
4f3e6e46c706835482e603335c1a6c558ecd0b49
20,078
def empty_stack(stack, graph): """ Pops the items in the stack. If they have no head, they are assigned a ROOT head :param stack: :param graph: :return: """ for word in stack: if word['id'] not in graph['heads']: graph['heads'][word['id']] = '0' graph['deprels'][word['id']] = 'ROOT' stack = [] return stack, graph
323a841f359d3a9823bd6e43d3de7540b3f2a6df
20,084
from pathlib import Path import argparse def valid_chart_dir(argstring): """ Validates that the specified string is a good directory path. For our purposes, that means that it exists and contains readable files named Chart.yaml and values.yaml. If this is all true, the function returns a pathlib.Path object for the directory. Otherwise an appropriate argparse exception is raised. """ dir_path = Path(argstring) if not dir_path.exists(): raise argparse.ArgumentTypeError("Path does not exist") elif not dir_path.is_dir(): raise argparse.ArgumentTypeError("Path exists but is not a directory") for file_name in [ "Chart.yaml", "values.yaml" ]: file_path = dir_path / file_name if not file_path.exists(): raise argparse.ArgumentTypeError("{} not found".format(file_path)) elif not file_path.is_file(): raise argparse.ArgumentTypeError( "{} found but it is not a regular file".format(file_path)) # Finally, make sure we can open it try: file_path.open("rt") except Exception as exc: raise argparse.ArgumentTypeError( "Error opening {file_path}: {exc}".format(file_path=file_path, exc=exc) ) from exc return dir_path
19aeb6d6f981c5fde603d6bbb7f0854574cddc57
20,086
def tc(text: str) -> str: """Filter for table cell content.""" return text.replace("|", "{vbar}")
85ead824329476ddb5c5cb97c874a3779ccea31e
20,087
import logging def render_disassembly(dis, match_offset, match_len, context_lines=4): """ Accepts a DecodeGenerator from distorm and returns a string that will be directly rendered in the ICE yara results page dis: DecodeGenerator from distorm.Decode() match_offset: offset into the file where the match occured match_len: Length of yara match context_lines: How many lines of disassembly to return before and after the matching lines """ lines = [] first_line = None last_line = None for i in range(len(dis)): instr = dis[i] asm = "0x{:08X} {:<20}{}".format(instr[0], instr[3], instr[2]) if instr[0] >= match_offset and instr[0] < match_offset + match_len: lines.append("<b>{}</b>".format(asm)) if not first_line: first_line = i else: lines.append(asm) if first_line and not last_line: last_line = i lines = ( lines[:first_line][-context_lines - 1 :] + lines[first_line:last_line] + lines[last_line:][:context_lines] ) logging.error("Rendered disassembly: {}".format("\n".join(lines))) return "\n".join(lines)
ce719252bae1f5833e788922832cf11207f63deb
20,088
def configure_layer_activations(layer_shapes, interpreted_shapes, nn_parameters, act_type, act_args): """ outputs the necessary tuples for layer activations of both conv and non-conv layers layer_shapes includes input layer, so i+1 is synced with nn_params no nn_params are included for input layer """ layer_act_types = [] layer_act_args = [] for i, layer_pars in enumerate(nn_parameters): l_act_args = act_args # If the user gave separate parameters for each layer, then # use those, else assign same parameters for each layer if len(act_args) != 0: if isinstance(tuple, act_args[0]): l_act_args = act_args[i] # Only applies to conv layer # conv_recurrent and conv_reservoir don't use activator parameter sharing if 'conv' in layer_pars['layer_type']: layer_act_types.append(act_type.value) layer_act_args.append((interpreted_shapes[i+1], True, *l_act_args)) elif 'reservoir' in layer_pars['layer_type']: layer_act_types.append(act_type.value) layer_act_args.append((layer_shapes[i+1], False, *l_act_args, *layer_pars['neuron_parameters'])) else: layer_act_types.append(act_type.value) layer_act_args.append((layer_shapes[i+1], False, *l_act_args)) return layer_act_types, layer_act_args
dc45a5cfa242826692119720a248bc0c6d18dfb2
20,090
from typing import Counter from typing import Tuple from typing import List def order_counts(counts: Counter[Tuple[int, ...]]) -> List[int]: """ Helper method for organising two-qubit correlations. :param counts: Counter object as returned by BackendResult, giving two qubit counts for desired correlation :type counts: Counter[Tuple[int, ...]] :return: A four element list, giving counts for the (0,0), (0,1), (1,0) and (1,1) states in order. :rtype: List[int] """ ordered_counts = [0, 0, 0, 0] if (0, 0) in counts: ordered_counts[0] = counts[(0, 0)] if (0, 1) in counts: ordered_counts[1] = counts[(0, 1)] if (1, 0) in counts: ordered_counts[2] = counts[(1, 0)] if (1, 1) in counts: ordered_counts[3] = counts[(1, 1)] return ordered_counts
7c538da5655d00da4399e0b33ededf4c86978848
20,091
def cfm2ft3_h(cfm): """cfm -> ft^3/h""" return 60*cfm
e22073590b1d89ef44e99b8a2bf3e437f26e4d96
20,092
def boundary_substraction( *, boundary: tuple[tuple[int, ...], ...], subtracted: tuple[tuple[int, ...], ...], ) -> tuple[tuple[int, ...], ...]: """From a boundary composed of tuples of node number segments, subtracts the subset of those segments contained in the subtracted tuple. Arguments: boundary (tuple[tuple[int, ...], ...]): A tuple of tuples of ints, where each integer is a node number in sequence along a discrete boundary. subtracted (tuple[tuple[int, ...], ...]): A subset of the boundary. Returns: tuple[tuple[int, ...], ...]: The difference of boundary less subtracted. """ output = tuple( boundary_i for boundary_i in boundary if boundary_i not in subtracted ) return output
67047475ddff113c8b34637cb4686fb37b8377ee
20,093
from datetime import datetime def get_daily_filename(date: datetime = datetime.today()) -> str: """Returns the filename for the given date in the format: 'yyyy-mm-dd-nnn-day.md' :param date [datetime.datetime] : Date for filename, defaults to datetime.today(). :return (str) : Filename """ iso_date = date.strftime("%Y-%m-%d") # ISO date doy = date.strftime("%j") # Day of year dow = date.strftime("%a").lower() # Day of week return f"{iso_date}-{doy}-{dow}.md"
0e954aec6de6914842417315b43a36ecbca9acdf
20,094
def extract_channels(df, gb_boxes): """Define a column reporting the marketing channel through which the customer was acquired.""" channels = gb_boxes['channel'].first().str[7:] channels.name = 'channel' df = df.join(channels) return df
753d994700c022e8d42d5bac542364c3d7933c8c
20,095
def get_num_pages(page_souped): """ This gets the number of pages to search through. :param page_souped: <class 'bs4.BeautifulSoup'>, a page that has been passed through BeautifulSoup(). :return: int, the number of pages to search through """ span_parsed = page_souped.find_all(lambda tag: tag.name == 'span' and tag.get('class') == ['pageRange']) try: span_parsed_contents_list = span_parsed[0].contents[0].split(' ') except IndexError: return 0 return int(span_parsed_contents_list[-1])
6930feb28b8a264433cbce93ed573b0c6172250b
20,096
import argparse import os def parse_args(): """parse command argument """ parser = argparse.ArgumentParser() parser.add_argument('input', type=str, help="memory statistic result file") args = parser.parse_args() assert os.path.exists(args.input), "input file not exists" return args
b936d5f5af2494db1526a9a87f0b8b64e19c0ca7
20,098
def readHysteresisDelayfromFile(timeZero): """ Read and create Hysteresis Delays from File. The values are given relative to the timezero (so -20 means 20ps before the set t0). The returned value is the absolute ps Delay for the setting of the stage. :param timeZero: :return: delayVector """ Vector = [] delayVector = [] with open('MeasureParams\\HysteresisDelayParams.txt') as f: for line in f: Vector.append(line.strip().split(',')) for entry in Vector[0]: delayVector.append(timeZero + float(entry)) return delayVector
8e4a790669c4284f813ea21178332160d6eb983e
20,099
import sys import os def WriteSource(base_name, dir_from_src, output_dir, global_string_map): """Writes C++ header/cc source files for the given map of string variables. Args: base_name: The basename of the file, without the extension. dir_from_src: Path from src to the directory that will contain the file, using forward slashes. output_dir: Directory to output the sources to. global_string_map: Map of variable names to their string values. These variables will be available as globals. """ copyright_header_template = ( '// Copyright 2018 The Chromium Authors. All rights reserved.\n' '// Use of this source code is governed by a BSD-style license ' 'that can be\n' '// found in the LICENSE file.\n\n' '// This file was generated by running:\n' '// %s') copyright_header = copyright_header_template % (' '.join(sys.argv)) # Write header file. externs = [] for name in global_string_map.keys(): externs += ['extern const char %s[];' % name] temp = '_'.join(dir_from_src.split('/') + [base_name]) define = temp.upper() + '_H_' header = '\n'.join([ copyright_header, '', '#ifndef ' + define, '#define ' + define, '', '\n'.join(externs), '', '#endif // ' + define]) header += '\n' with open(os.path.join(output_dir, base_name + '.h'), 'w') as f: f.write(header) # Write cc file. def EscapeLine(line): return line.replace('\\', '\\\\').replace('"', '\\"') definitions = [] for name, contents in global_string_map.items(): lines = [] if '\n' not in contents: lines = [' "%s"' % EscapeLine(contents)] else: for line in contents.split('\n'): lines += [' "%s\\n"' % EscapeLine(line)] definitions += ['const char %s[] =\n%s;' % (name, '\n'.join(lines))] cc = '\n'.join([ copyright_header, '', '#include "%s"' % (dir_from_src + '/' + base_name + '.h'), '', '\n'.join(definitions)]) cc += '\n' with open(os.path.join(output_dir, base_name + '.cc'), 'w') as f: f.write(cc)
2784c04c79eb24dabd6e8704ffe1e37a24f369fb
20,100
def binary_srch_exact(a,t,lo,hi): """ Here, a is a rotated sorted array. """ while hi>=lo: mid=(lo+hi)//2 if a[mid]==t: return mid elif a[mid]<t: lo=mid+1 elif a[mid]>t: hi=mid-1 return -1
689b185da6594c6b244b94cc5f07f850958e473e
20,101
def bucket_sort(m): """ bucketSort(arr[], n) 1) Create n empty buckets (Or lists). 2) Do following for every array element arr[i]. .......a) Insert arr[i] into bucket[n*array[i]] 3) Sort individual buckets using insertion sort. 4) Concatenate all sorted buckets. """ n = 4 # number of buckets to use buckets = [[] for _ in range(n)] for x in m: pos = (x // n) - 1 if pos > n: pos = n - 1 elif pos < 0: pos = 0 buckets[pos].append(x) result = [] for bucket in buckets: result += sorted(bucket) return result
8ca5c6df6ea672372910a5340f25a03d293dea97
20,102
def vcr(vcr): """Set parameters vor VCR""" vcr.ignore_localhost = True return vcr
c84b5228dec1df85c8b0128104b9c6cfe99997a3
20,104
def copy_worksheet(workbook, worksheet): """ Creates a copy of the worksheet. :param workbook: The workbook the worksheet is from. :param worksheet: Worksheet to copy. :return: A copy of the worksheet. """ return workbook.copy_worksheet(worksheet)
d650f9f056c1fa951dc1de28c362015726798f26
20,105
def get_answer(current_row): """Returns the answer text value og HTML element""" return current_row.find_all("td", class_="risp")[0].text.strip()
14e91c250d6f28b98534fe7839e47b3650750152
20,106
def _split_scene(images, cameras, top_down, captions): """Splits scene into query and target. Args: images: A tensor containing images. cameras: A tensor containing cameras. top_down: A tensor containing the scene seen from top. captions: A tensor containing captions. Returns: A tuple query, target. The query is a tuple where the first element is the sequence of 9 (images, cameras, captions) which can be given to the model as context. The second element in the query is the camera angle of the viewpoint to reconstruct. The target contains the image corresponding to the queried viewpoint, the text description from that viewpoint and an image of the scene viewed from above. """ context_image = images[:-1, :, :, :] context_camera = cameras[:-1, :] context_caption = captions[:-1] target_image = images[-1, :, :, :] target_camera = cameras[-1, :] target_caption = captions[-1] query = ((context_image, context_camera, context_caption), target_camera) target = (target_image, target_caption, top_down) return query, target
cb1aa58dcd3f3bd33f113fef7b38399effa484a7
20,108
def reconstruct_keyphrase(keyphrase_components, hyphenated): """Reconstructs a keyphrase from the original text based on the previously saved list of :param hyphenated words""" for i, _ in enumerate(keyphrase_components): if i == len(keyphrase_components) - 1: break if keyphrase_components[i]+'-'+keyphrase_components[i+1] in hyphenated: original_word = keyphrase_components[i]+'-'+keyphrase_components[i+1] keyphrase_components.pop(i) keyphrase_components.pop(i) keyphrase_components.insert(i, original_word) keyphrase = ' '.join(keyphrase_components) return keyphrase
f88d1bd073ca66d0a786bd6524d185ac071e5cbb
20,109
import requests from pathlib import Path def download_url(url,download_path): """ Download an url url: url to download download_path: path where to save the downloaded url """ r = requests.get(url, stream = True) if not Path(download_path).exists(): print("Downloading file {}".format(url)) with open(download_path,"wb") as dest_file: for chunk in r.iter_content(chunk_size=1024): if chunk: dest_file.write(chunk) return 1 else: return 0
2d95aeb5dbad228fa8a09279b5084f750c5b4362
20,110
def truncate(message, from_start, from_end=None): """ Truncate the string *message* until at max *from_start* characters and insert an ellipsis (`...`) in place of the additional content. If *from_end* is specified, the same will be applied to the end of the string. """ if len(message) <= (from_start + (from_end or 0) + 3): return message part1, part2 = message[:from_start], '' if from_end and len(message) > from_end: if len(message) - from_start < from_end: from_end -= len(message) - from_start part2 = message[-from_end:] return part1 + '...' + part2
44e37164aff5912d37a3dc27217e498b1c14efff
20,112
def get_key_padding_mask(padded_input, pad_idx): """Creates a binary mask to prevent attention to padded locations. Arguements ---------- padded_input: int Padded input. pad_idx: idx for padding element. Example ------- >>> a = torch.LongTensor([[1,1,0], [2,3,0], [4,5,0]]) >>> get_key_padding_mask(a, pad_idx=0) tensor([[False, False, True], [False, False, True], [False, False, True]]) """ if len(padded_input.shape) == 4: bz, time, ch1, ch2 = padded_input.shape padded_input = padded_input.reshape(bz, time, ch1 * ch2) key_padded_mask = padded_input.eq(pad_idx).to(padded_input.device) # if the input is more than 2d, mask the locations where they are silence # across all channels if len(padded_input.shape) > 2: key_padded_mask = key_padded_mask.float().prod(dim=-1).bool() return key_padded_mask.detach() return key_padded_mask.detach()
234d5f947b7042c5edad68e9b8162e4bbf6963f3
20,113
def test_pdf(): """ Returns the binary data of test_pdf.pdf for testing purposes """ with open("./test_data/test_pdf.pdf", "rb") as pdff: return pdff.read()
e8096078d30b04f99cdf1bae7c28a24f191bd004
20,114
from typing import Iterable import torch from typing import Dict from typing import List def cluster_strings(strings: Iterable[str]) -> torch.Tensor: """ given a list of strings, assigns a clustering, where each pair of identical ground truth strings is in the same cluster return a torch.LongTensor containing the cluster id of each ground truth """ cluster_id_by_truth: Dict[str, int] = {} cluster_l: List[int] = [] for n, truth in enumerate(strings): cluster_id = cluster_id_by_truth.setdefault(truth, len(cluster_id_by_truth)) cluster_l.append(cluster_id) return torch.tensor(cluster_l, dtype=torch.int64)
7821fa946e7a07be13411f54913ee71f1e67dc3a
20,116
import copy def merge_configs(default, overwrite): """From `cookiecutter <https://github.com/audreyr/cookiecutter>`__""" new_config = copy.deepcopy(default) for key, value in overwrite.items(): if isinstance(value, dict): new_config[key] = merge_configs(default[key], value) else: new_config[key] = value return new_config
073c21d334c0454efec436ea5b95eb76b66345df
20,117
def format_conditions(**kwargs): """Converts an arbitrary number of lists to a list of dictionaries. Useful for specifying the conditions in pmutt.io.chemkin.write_EA Parameters ---------- kwargs - keyword arguments Lists of the conditions where each index corresponds to a run Returns ------- conditions - list of dict A list where each element is a dictionary containing the conditions for a specific run """ conditions = [] for cond_name, cond_values in kwargs.items(): for i, cond_value in enumerate(cond_values): try: conditions[i][cond_name] = cond_value except (IndexError, KeyError): conditions.append({cond_name: cond_value}) return conditions
a7ee7dcb6abff418c9795c9a1e604cab2bfbbe19
20,118
def Split(*_args): """Fake Split""" return []
93c8528c923677a84c86942718f7ad4a7770048e
20,119
def extension_to_type(ext): """ Return the notebook type for a given file extension """ if ext == 'ipynb': return 'jupyter' raise RuntimeError(f"unknown file extension {ext}")
94068035284f837c990ab825df3b25fefced6ea4
20,120
import sys def export(function): """ Decorator to mark a function as "exported" by adding it to the module's :data:`__all__`. This is useful to avoid exposing module internals to importers. """ module = sys.modules[function.__module__] if hasattr(module, '__all__'): module.__all__.append(function.__name__) # type: ignore else: module.__all__ = [function.__name__] # type: ignore return function
c138561c9d03af398a2b8312d1a67acf0119cae8
20,121
import math def _cosine(a, b): """ Return the len(a & b) / len(a) """ return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
7777b1fd79fa7964771852493751f5e01b226e99
20,122
def years_between_dates(begin_date, end_date): """ Calculate the number of years until a date, starting from today. """ diff = end_date - begin_date seconds = diff.total_seconds() seconds_per_year = 60 * 60 * 24 * 365 years_elapsed = seconds / seconds_per_year return years_elapsed
a98a6675a0e8d422109dc15765f9f6a144175d0e
20,124
def prefix(values, content): """ Discover start and separate from content. :param list[str] values: Will scan through up to the one ``content`` starts with. :param str content: The value to scan, will separate from the start if found. :raises: :class:`ValueError` if no start matches. .. code-block:: py >>> prefix(('-', '.', '!'), './echo') >>> ('.', '/echo') """ for value in values: if content.startswith(value): break else: raise ValueError('invalid start') size = len(value) content = content[size:] return (value, content)
2bce07c6a33925434768e339e27ad44bbe543a26
20,125
import os def get_files_with_extension(path, extension, sort=True): """ Gets all files in the given directory with a given extension. :param str path: the directory from which to retrieve the files. :param str extension: the extension of the files to be retrieved. :param bool sort: whether to sort list of files based on file name. :rtype: list[str] :return: the list of files in the given directory with the required extension. """ file_list = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.' + extension)] if sort: file_list.sort() return file_list
cc03d74d56b0d64276df964428f675b5448302c4
20,126
import torch def point_norm(points_int, width, height, device): """ 将像素点坐标归一化至 -1 ~ 1 """ points_int_clone = torch.tensor(points_int, device=device) x = ((points_int_clone * 2)[..., 0] / (width - 1) - 1) y = ((points_int_clone * 2)[..., 1] / (height - 1) - 1) return torch.stack([x, y], dim=-1).contiguous().view(-1, 2)
0e473fc4d3c458b45d1a572cc4c4a5dcc24c4c30
20,127
from typing import List def get_row(row: List[str], cells_per_row: int, cell_tag: str): """ :param row: list of cell contents :param cells_per_row: how many cells per row :param cell_tag: tag name for the cell, td and th being the possibilities known. :return: html describing the row """ html_row = "\n<tr>\n" for i, cell in enumerate(row): if i == cells_per_row: # sub-divide natural row width: html_row += "\n</tr>\n<tr>" html_row += "<{}>".format(cell_tag) + cell + "</{}>".format(cell_tag) return html_row + "\n</tr>"
cb919c311af3314e2c1eb25bb0949c836312f96c
20,129
def sound_to_ts_K(sound, eq_type='gill_manual', e_a=None): """ Convert speed of sound to """ if eq_type=="gill_manual": return (sound ** 2) / 403 if eq_type=="s_audio": return ((sound / 331.3) ** 2 - 1) * 273.15 + 273.15 else: return None
137c79c871d199576f646b893579f93b45b0ddc3
20,130
def gen(symbol, s1, s2, s3): """ Generate one quaternary formula. :param symbol: Mask object :param s1: Mask object :param s2: Mask object :param s3: Mask object """ result = '(' + symbol.outer for s in [s1, s2, s3]: result += ',' if len(s.outer) == 0: result += '_' else: result += s.inner result += ')' return result
a3817b32521d420caa90f0b3c6511395030e48a9
20,131
import os def get_env_vars(config, env_name): """Return a dict of environment variables for the named environment.""" if not config.has_section(env_name): raise ValueError(f"{env_name} environment doesn't exist") config_section = config[env_name] return { 'WINEPREFIX': os.path.expanduser(config_section['prefix']), 'WINEARCH': config_section['arch'], 'LANG': config_section['lang'], }
e82731973a8cbd687b501108823298f8ded5e7a9
20,132
def is_night(transaction_date): """computes if time is at night defines as between 00:00 and 06:00 Args: datetime variable Returns: boolean: is_night """ # Get the hour of the transaction tx_hour = transaction_date.hour # Binary value: 1 if hour less than 6, and 0 otherwise is_night = tx_hour <= 6 return int(is_night)
e9cff06adec0360e4850d96ec1b2004c5db2a178
20,134
def comp(z1, z2, tol): """Return a bool indicating whether the error between z1 and z2 is <= tol. If z2 is non-zero and ``|z1| > 1`` the error is normalized by ``|z1|``, so if you want the absolute error, call this as ``comp(z1 - z2, 0, tol)``. """ if not z1: z1, z2 = z2, z1 if not z1: return True diff = abs(z1 - z2) az1 = abs(z1) if z2 and az1 > 1: return diff/az1 <= tol else: return diff <= tol
6c51adbae99f9d642b7e46010c96b3b1cf8ba44d
20,135
def _as_dict(module): """ Returns publicly names values in module's __dict__. """ try: return {k: v for k, v in module.__dict__.iteritems() if not k[0:1] == '_'} except AttributeError: return {}
5904d111040219f7f4b4c2f82e4d9f86cbe1c401
20,137
def convert_torrent_status(qbtstatus, qbtforce=False): """Take in qbt state and convert to utorrent status""" utstatus = '' # DL in progress (percent progress < 1000) if qbtstatus == 'error': utstatus = '152' elif qbtstatus == 'pausedUP': # I think this is the closest thing QBT has to # the UT status of 'finished'. If you set your # config to pause completed torrents after hitting a share # ratio, this is the status, which UT would call finished. # MCM reads this as 'stopped' utstatus = '136' elif qbtstatus == 'pausedDL' and qbtforce is True: utstatus = '169' elif qbtstatus == 'pausedDL' and qbtforce is False: utstatus = '233' elif qbtstatus == 'queuedUP': utstatus = '200' elif qbtstatus == 'queuedDL': utstatus = '200' elif qbtstatus == 'uploading': utstatus = '201' elif qbtstatus == 'stalledUP': utstatus = '201' elif qbtstatus == 'checkingUP': utstatus = '130' elif qbtstatus == 'checkingDL': utstatus = '130' elif qbtstatus == 'downloading' and qbtforce is True: utstatus = '137' elif qbtstatus == 'downloading' and qbtforce is False: utstatus = '201' elif qbtstatus == 'stalledDL': utstatus = '201' elif qbtstatus == 'metaDL': utstatus = '201' else: # Just set the default to 201 utstatus = '201' return utstatus
367004b9e5caf64dc922ce5a78696754aabac624
20,139
def cubes(): """Noodle cubes list.""" return [ 'C1540484434_1_001', 'C1540484434_1_002', 'C1540484434_1_003', ]
682ab3d1c74e7563ad5ac48f2a487c87dcdc2596
20,140
def str_count(count, word): """ str_count(10, 'word') -> '10 words' str_count(1, 'word') -> '1 word' """ return '%d %s%s' % (count, word, '' if count == 1 else 's')
79d8f777537931d911f2e3b06ac1cf1bc1d75bba
20,142
from pathlib import Path def default_gobbli_dir() -> Path: """ Returns: The default directory to be used to store gobbli data if there's no user-specified default. """ return Path.home() / ".gobbli"
5793c063ad0212934a793e8a0f1da0050d3bae8e
20,143
def avg_RT(tweets_candidat): """compte le nombre moyen de reweets par candidat""" nombre_tweets=len(tweets_candidat) RTStotal=0 for a in tweets_candidat: #parcourt la liste de tweet pour compter le nombre total de retweets (RTS) RTStotal+=a["RTS"] return(RTStotal/nombre_tweets)
8a3abb0bb03a211d5fd2fd93b22b2d223836b772
20,144
import argparse import sys def parse_args(): """Parse command line arguments""" parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=__doc__) parser.add_argument("-a", "--array", action="store_true", help="Format the output as a large array.") parser.add_argument("input_file", type=argparse.FileType('r'), nargs='?', default=sys.stdin, help="The input file (FSDB file) to read") parser.add_argument("output_file", type=argparse.FileType('w'), nargs='?', default=sys.stdout, help="The output file (json file) to write back out") args = parser.parse_args() return args
1ec059da12c1a07fd7f2d72b8204fb9a093880be
20,147
def get_first_same_date_indexes_in_sublists(dates1, dates2, index1, index2): """ This method get two lists of dates and two starting positions from respectively the first and the second dates lists. It then finds the closest date in time that is contained in both of the data lists from those two positions on and returns the indices of the found date for both of the arrays. :param dates1: list This is the first list with dates :param dates2: list This is the second list with dates :param index1: int This is the starting position for the first list :param index2: int This is the starting position for the second list :return: int, int Two indices representing the position of the first encountered matching date """ if index1 >= len(dates1) or index2 >= len(dates2): return -1, -1 if dates1[index1].seconds == dates2[index2].seconds: return index1, index2 if dates1[index1].seconds < dates2[index2].seconds: return get_first_same_date_indexes_in_sublists(dates1, dates2, index1 + 1, index2) return get_first_same_date_indexes_in_sublists(dates1, dates2, index1, index2 + 1)
8bf21dfe9ca386bafeeb20032f3b0b5b3101ddd8
20,148
from typing import List def keys_remove_dollar_suffixes(keys : List[str]) -> List[str]: """Removes dollar suffixes from keys.""" result = [] for key in keys: i = key.find("$") if i != -1: result.append(key[:i]) else: result.append(key) return result
355f521b04fd00a27130bf714559208b062fe79a
20,150
import six import sys def wrap_exception(exception_class): """Helper decorator method to modify the raised exception class to `exception_class` but keeps the message and trace intact. :param exception_class: class to wrap raised exception with """ def generic_exception(method): def wrapper(*args, **kwargs): try: return method(*args, **kwargs) except Exception as e: six.reraise( exception_class, exception_class(str(e)), sys.exc_info()[2]) return wrapper return generic_exception
64a4ff435a11fb4e2eb4bd68a15a5c50cd14cff4
20,151
def _get_or_create_personal_context(user): """Get or create personal context for user""" personal_context = user.get_or_create_object_context( context=1, name='Personal Context for {0}'.format(user.id), description='') return personal_context
6dc3cce9a0073db480608d1766e4892dfa66e310
20,152
def MyMap(map, r, distance=3, offset=0): """returns -1, if no mapping possible with distance of 3 residues. 0: if mapping is out of bounds """ if r < map.getRowFrom() or r > map.getRowTo(): return 0 b = min(1, r - distance) a = r while a >= b: c = map.mapRowToCol(a) if c != 0: return c + offset a -= 1 b = max(map.getRowTo(), r + distance) a = r while a <= b: c = map.mapRowToCol(a) if c != 0: return c + offset a += 1 return -1
9a459590e9b6ac64ac948d613ae8207eef70cac7
20,153
import time def keep_authenticated(params): """ """ request_start = time.time() self = params.get('self') auth_details = self.auth_details expiration_time = auth_details.get("expiration_time", 0) if auth_details else 0 if self.leader and not auth_details: self.authenticate() auth_details = self.auth_details else: retries = 10 while retries: if expiration_time - request_start <= 0: time.sleep(1) auth_details = self.auth_details if auth_details: break retries -= 1 return params
b8c44bc015059dd84a7b52d6ee7e7d1cd4c02006
20,154
import subprocess import sys def _run_command_get_output(args, success_output): """Runs shell command and returns command output.""" p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pout, _ = p.communicate() if p.returncode == 0: return success_output # For Python3 only: if isinstance(pout, bytes) and sys.version_info >= (3, ): pout = pout.decode('utf-8') return pout
eb9afae84317a642ece5330fbfa8476870094e6b
20,155
def add_text_element(param_args): """Generates a string that represents a html text block. The input string should be wrapped in proper html tags param_args - a dictionary with the following arguments: param_args['text'] - a string returns - a string """ return param_args['text']
58418902631d2a082b340541a387ffba6f1db8ac
20,156
import numbers def all_numeric(data): """ Tests if all values in an iterable are numeric. Args: data: An iterable claiming to contain numbers Returns: A list containing a boolean indicating whether all of the values were numbers and then a list of the genuine numeric values """ nums = [dt for dt in data if isinstance(dt, numbers.Number)] return [len(nums) == len(data), nums]
b7ed6f5c37cb5cf41a3c5a337e686a4144d531dc
20,157
def encode_decode_recognizer__simple_test(ctx, self, img, img_metas, **kwargs): """Rewrite `simple_test` of EncodeDecodeRecognizer for default backend. Rewrite this function to early return the results to avoid post processing. The process is not suitable for exporting to backends and better get implemented in SDK. Args: ctx (ContextCaller): The context with additional information. self: The instance of the class EncodeDecodeRecognizer. img (Tensor): Input images of shape (N, C, H, W). Typically these should be mean centered and std scaled. img_metas (list[dict]): A list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys, see :class:`mmdet.datasets.pipelines.Collect`. Returns: out_dec (Tensor): A feature map output from a decoder. The tensor shape (N, H, W). """ feat = self.extract_feat(img) out_enc = None if self.encoder is not None: out_enc = self.encoder(feat, img_metas) out_dec = self.decoder(feat, out_enc, None, img_metas, train_mode=False) return out_dec
87d9171159ab3da56e949338a7bbc0db8e632852
20,158
def create_names(instances, Ns, str_format): """ Create a list of names for spectra loaded from task instances. :param instances: A list of task instances where spectra were loaded. This should be length `N` long. :param Ns: A list containing the number of spectra loaded from each task instance. This should have the same length as `instances`. :param str_format: A string formatting for the names. The available keywords include all parameters associated with the task, as well as the `star_index` and the `spectrum_index`. :returns: A list with length `sum(Ns)` that contains the given names for all the spectra loaded. """ names = [] for star_index, (instance, N) in enumerate(zip(instances, Ns)): kwds = instance.parameters.copy() kwds.update(star_index=star_index) for index in range(N): kwds["spectrum_index"] = index names.append(str_format.format(**kwds)) return names
f7d734e712769d86c3ca31483790c6a1e46c3962
20,160
def bow_feature(img, extractor_bow, detector): """ 根绝BOW获得图像描述符 extractor_bow: 词袋方法 detector: 特征检测方法 """ return extractor_bow.compute(img, detector.detect(img))
aa5056995a3d93135f6b9a8ac18e1b3a1c284bba
20,161
def find_cheapest_fuel(crabs): """ Find cheapest fuel consumption combination :param crabs: list of crabs :return: cheapest sum of fuel consumption """ min_fuel_sum = sum(crabs) for align_position in range(min(crabs), max(crabs) + 1): current_fuel_sum = 0 for crab_position in crabs: current_fuel_sum += abs(align_position - crab_position) if current_fuel_sum < min_fuel_sum: min_fuel_sum = current_fuel_sum return min_fuel_sum
9af48709b08aa62d2ac0811482b6e6f62b1c6c81
20,162
def fluid_saturation(vol_fluid=50, vol_pore=100): """Returns the fluid saturation given the fluid volume and pore volume.""" return float(vol_fluid/vol_pore)
30d608e384076ae94626a7f4351e58fe3d0e05bc
20,163
def insert_shift_array_sorted(arr, val): """ This function inserts a value in a sorted array, at it's numericly associated index """ new = [] # Find location to insert c = 0 found = False for x in arr: if arr[x-1] > val: found = True break c += 1 if found: new.extend(arr[:c]) new.append(val) new.extend(arr[c:]) # I want this to work, but the return of Extend() doesn't seem to stay with the interpeter # AttributeError: 'NoneType' object has no attribute 'append' #new.extend(arr[:c]).append(val).extend(arr[c:]) else: new = arr return new
c62ed6cdcfd6eba202c0c500eea1d42793438c33
20,164
def get_friendfeed_name(friendfeed_profile, friendfeed_name): """Looks into the profile to get the users real name.""" try: name = friendfeed_profile['name'] except KeyError: try: name = friendfeed_profile['nickname'] except KeyError: name = friendfeed_name return name
abf560c4687e74a95fe5168c769cde059be11466
20,166
def connected_components(edges): """ Computes the connected components. @param edges edges @return dictionary { vertex : id of connected components } """ res = {} for k in edges: for _ in k[:2]: if _ not in res: res[_] = _ modif = 1 while modif > 0: modif = 0 for k in edges: a, b = k[:2] r, s = res[a], res[b] if r != s: m = min(res[a], res[b]) res[a] = res[b] = m modif += 1 return res
5040377e0b6cdbf6365802f3378496e14741e59d
20,168
import time def timef(): """ Returns time as a human-readable formatted string """ return str(time.strftime('%d. %m. %Y %H:%M:%S', time.localtime()))
e4844d69ec27da038ffc530bd6b2eb6fffaf81bf
20,169
def total_duration(geo_data, skip_inactive): """return the total duration in sec @param: geo_data: dict with geografic data @param: skip_inactive: consider only trackpoints with movement (active) """ rel_time = geo_data["relative_timestamps"][-1] if skip_inactive: inactive_time = 0 for diff_time, active in zip(geo_data["differential_timestamps"], geo_data["active"]): if active == False: inactive_time += diff_time rel_time = rel_time - inactive_time return rel_time
a1638e15695ff8bcd694e46b4071b59f4ae20534
20,170
import inspect import ast def call_or_eval_one(item, namespace): """ Given a mix of callables and string expressions, call or eval them. Parameters ---------- item : String | Callable Each item must be a stream name, field name, a valid Python expression, or a callable. The signature of the callable may include any valid Python identifiers provideed in the namespace. namespace : Dict The namespace that the item is evaluated against. Returns ------- result : Any Raises ------ ValueError If input is not String or Callable BadExpression If input is String and eval(...) raises an error """ # If it is a callable, call it. if callable(item): # Inspect the callable's signature. For each parameter, find an # item in our namespace with a matching name. This is similar # to the "magic" of pytest fixtures. parameters = inspect.signature(item).parameters kwargs = {} for name, parameter in parameters.items(): try: kwargs[name] = namespace[name] except KeyError: if parameter.default is parameter.empty: raise ValueError(f"Cannot find match for parameter {name}") # Otherwise, it's an optional parameter, so skip it. return item(**kwargs) elif isinstance(item, str): # If it is a key in our namespace, look it up. try: # This handles field or stream names that are not valid # Python identifiers (e.g. ones with spaces in them). return namespace[item] except KeyError: pass # Check whether it is valid Python syntax. try: ast.parse(item) except SyntaxError as err: raise ValueError(f"Could find {item!r} in namespace or parse it as a Python expression.") from err # Try to evaluate it as a Python expression in the namespace. try: return eval(item, namespace) except Exception as err: raise ValueError(f"Could find {item!r} in namespace or evaluate it.") from err else: raise ValueError(f"expected callable or string, received {item!r} of type {type(item).__name__}")
044465936abe0f379a5ce4291b8d2c6c6d996d65
20,171
def with_simple_result(func): """Simple result that uses returned value as-is""" def wrapper(*args, **kwargs): returned = func(*args, **kwargs) print('--RESULT--') print(returned) print('--RESULT--') return wrapper
a93c85b2827a13ae3c127162ef9e78a1e9008f5f
20,172
def get_signed_polygon_area(points): """ Get area 2d polygon :param points: list[DB.UV] :type points: list[DB.UV] :return: Area :rtype: float """ area = 0 j = points[len(points) - 1] for i in points: area += (j.U + i.U) * (j.V - i.V) j = i return area / 2
960d5a3e5bff125fb560580f81b5103fa8147789
20,174
def sec_to_exposure_decimation(sec): """ Convert seconds to exposure and decimation. The algorithm is limited since it multiplies decimation by 10 until the resulting exposure is less than 65_535. This is not perfect because it limits decimation to 10_000 (the next step would be 100_000 which is bigger then max decimation of 65_535). The max theoretical value is ~497 days. This algorithm is limited to ~75 days. If it is not enough for you feel free to improve it :-) (max theoretical = datetime.timedelta(seconds = 2**16 * 2**16 * 10E-3)) """ decimation = 1 deci_millis = sec * 100 while (2 ** 16 * decimation) < deci_millis: decimation *= 10 exposure = round(deci_millis / decimation) return exposure, decimation
c880a371bc3aa9420de094df4815a876bb504b33
20,176
def color_int_to_rgb(value: int) -> tuple[int, int, int]: """Return an RGB tuple from an integer.""" return (value >> 16, (value >> 8) & 0xFF, value & 0xFF)
ff68ff63032470d09c9222a7883b7c8848770985
20,178
def aggregate_data(business_data, estimated_data, area_id, lookup, lad_id): """ Aggregate all data by output area ready for exporting. """ area_km2 = lookup[area_id]['area_km2'] households = 0 hh_fixed_access = 0 hh_wifi_access = 0 for item in estimated_data: if area_id == item['Area']: households += 1 if item['hh_fixed_access'] == 1: hh_fixed_access += 1 if item['hh_wifi_access'] == 1: hh_wifi_access += 1 if hh_fixed_access > 0 or households > 0: perc_hh_fixed_access = (hh_fixed_access / households) * 100 else: perc_hh_fixed_access = 0 if hh_fixed_access > 0 or households > 0: perc_hh_wifi_access = (hh_wifi_access / households) * 100 else: perc_hh_wifi_access = 0 return { 'msoa': area_id, 'area_km2': area_km2, 'population': lookup[area_id]['population'], 'population_km2': lookup[area_id]['population'] / area_km2, 'urban_rural': lookup[area_id]['geotype'], 'households': households, 'households_km2': households / area_km2, 'hh_fixed_access': hh_fixed_access, 'hh_wifi_access': hh_wifi_access, 'hh_fixed_access_km2': hh_fixed_access / area_km2, 'hh_wifi_access_km2': hh_wifi_access / area_km2, 'perc_hh_fixed_access': perc_hh_fixed_access, 'perc_hh_wifi_access': perc_hh_wifi_access, 'region': lookup[area_id]['region'], 'lad_id': lad_id, 'businesses': business_data['businesses'], 'business_density_km2': business_data['businesses'] / area_km2, #busines adoption - ba_ 'ba_micro': business_data['ba_micro'], 'ba_small': business_data['ba_small'], 'ba_medium': business_data['ba_medium'], 'ba_large': business_data['ba_large'], 'ba_very_large': business_data['ba_very_large'], 'ba_total': business_data['ba_total'], #busines adoption floor area - bafa_ 'bafa_micro': business_data['bafa_micro'], 'bafa_small': business_data['bafa_small'], 'bafa_medium': business_data['bafa_medium'], 'bafa_large': business_data['bafa_large'], 'bafa_very_large': business_data['bafa_very_large'], 'bafa_total': business_data['bafa_total'], #business access points - baps_ 'baps_total_low': business_data['baps_total_low'], 'baps_density_km2_low': business_data['baps_total_low'] / area_km2, 'baps_total_baseline': business_data['baps_total_baseline'], 'baps_density_km2_baseline': business_data['baps_total_baseline'] / area_km2, 'baps_total_high': business_data['baps_total_high'], 'baps_density_km2_high': business_data['baps_total_high'] / area_km2, }
aae549002635a1c2b3654edc31adbff7b936b319
20,179
import os def get_host(): """ Returns a host name from Aton driver @return: str """ aton_host = os.getenv("ATON_HOST") if aton_host is None: return "127.0.0.1" else: return str(aton_host)
1399c17c8d8852fa32cbd5f7e9a61ec485fe3fc3
20,181
def phase_flip(phase): """ Flip phasing """ return [(y,x) for x,y in phase]
41e095b8d728a86599d04383252d2e60c394193c
20,182
import re def swap_numbers(tweet): """ Swap different numbers for placeholders, as our model doesn't learn much from them """ tweet = tweet.replace('_', ' ') + ' ' tweet = re.sub('\$\d\s?.?\s?\d+?k?m?b?t?\s\d+\$', ' dollarvalueplaceholder ', tweet) tweet = re.sub('\$\d+?\s?.?\s?\d?k?m?b?t?\s', ' dollarvalueplaceholder ', tweet) tweet = re.sub('\d+?\$', ' dollarvalueplaceholder ', tweet) tweet = re.sub(r"\d\s?.?\s?\d+?k?m?b?t?\s", ' numbervalueplaceholder ', tweet) # remove numbers tweet = re.sub(r"\d+?\s?.?\s?\d?k?m?b?t?\s", ' numbervalueplaceholder ', tweet) # remove numbers tweet = re.sub(r"\d+?s?t?h?n?r?d?\s", ' ', tweet) tweet = re.sub(r"\d+?", ' ', tweet) # remove leftover numbers tweet = re.sub("%", "percentageplaceholder", tweet) if 'percentageplaceholder' in tweet: tweet = tweet.replace("percentageplaceholder", '') + ' percentageplaceholder' if 'dollarvalueplaceholder' in tweet: tweet = tweet.replace('dollarvalueplaceholder', '') + ' dollarvalueplaceholder' if 'numbervalueplaceholder' in tweet: tweet = tweet.replace('numbervalueplaceholder', '') + ' numbervalueplaceholder' return tweet.replace('$', '')
f4a4f86ffa21d8f4a8bb7760196f99b17d021a02
20,183
def capitalize(s, ind): """ Given a string and an array of integers representing indices, capitalize all letters at the given indices. :param s: a string value. :param ind: a array of integers representing indices. :return: capitalize all letters at the given indices in place. """ return "".join(j.upper() if i in ind else j for i, j in enumerate(s))
e79a4f534195d4622c0a2dc3a6adf4e4a5d30f0a
20,184
def tb_lineno(tb): """Calculate correct line number of traceback given in tb. Obsolete in 2.3. """ return tb.tb_lineno
5e40a570cea4d4182ca48fed38fc6bb79c6a819c
20,185
import time def time_stamp(): """Returns time stamp string. This is currently just a wrapper to time.asctime, but we are keeping it since (a) it saves loading the time module, and (b) it allows future flexibility in the preferred logging format. Returns: (str): text for use as time stamp """ return time.asctime()
ee55e575c1d85b47c7eacd0561273d37fe225bcc
20,187
def drop_cols(df, col_names, operation): """ Perform an operation to the column if the first part of the name begins with a certain name. Ex. The first part of content_bicycle is content :param df: dataframe :type df: pandas.core.frame.DataFrame :param col_names: string to match :type col_names: str :returns: dataframe :rtype: pandas.core.frame.DataFrame """ for col in df.columns: if col.split('_')[0] == col_names: if operation == "drop": df = df.drop([col], axis=1) else: raise ValueError("Not an avaliable action") return df
6aa64c0614101dd419f76fd8604e36f5bf33eae6
20,188