content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def add_docusaurus_metadata(content: str, id: str, title: str, hide_title) -> str: """ Add docusaurus metadata into content. """ return f"---\nid: {id}\ntitle: {title}\nhide_title: {hide_title}\n---\n\n" + content
00b3e9f583565d38e03e361957c942b16d1e270a
32,199
def same(left, right): """ Used for testing that thing link system names are the same (they can be supplied in different cases - upper or lower). returns True if they should be treated as the same """ return str(left).upper() == str(right).upper()
564fc62228842110687d4697a7144e59f6310613
32,200
def bibentry_to_style(bibentry, style='default'): """Format a bibtext dictionary entry as a string.""" s = '' if style == 'default': s += '%s ' % bibentry['author'] s += '(%s). ' % bibentry['year'] s += '*%s*' % bibentry['title'] if 'journal' in bibentry: s += '. %s, ' % bibentry['journal'] if 'volume' in bibentry: s += '%s' % bibentry['volume'] if 'number' in bibentry: s += '(%s)' % bibentry['number'] if 'pages' in bibentry: s += ', %s' % bibentry['pages'].replace('--', '-') s += '.' return s
4d3443522d9f54153eb6c4b5f4949a99209de532
32,201
def orb_rot_meta(name): """Parse metadata from orbital rotation variable name Args: name (str): optimizable variable name Return: dict: metadata Example: >>> name = "spo-up_orb_rot_0000_0002" >>> orb_rot_meta(name) >>> {'prefix': 'spo-up', 'i': 0, 'j': 2} """ useful = name.replace('orb_rot_', '') tokens = useful.split('_') i = int(tokens[-2]) j = int(tokens[-1]) suffix = '_%04d_%04d' % (i, j) prefix = useful.replace(suffix, '') meta = {'prefix': prefix, 'i': i, 'j': j} return meta
8bcb658e9daf51d31aae33789dc86db5c33d2158
32,202
import torch def cross_entropy(output, target): """ cross entropy loss. :param output: :param target: :return: """ loss = torch.nn.CrossEntropyLoss() return loss(output, target)
c0f8accaaf4360c97cdbb392f43fc52c4077681f
32,204
def _to_gj_point(obj): """ Dump a Esri JSON Point to GeoJSON Point. :param dict obj: A EsriJSON-like `dict` representing a Point. :returns: GeoJSON representation of the Esri JSON Point """ if obj.get("x", None) is None or \ obj.get("y", None) is None: return {'type': 'Point', 'coordinates': ()} return {'type': 'Point', 'coordinates': (obj.get("x"), obj.get("y"))}
8ee9299be34fe7402eb350589a58a5fdb471c20a
32,205
import os def smells_like_maildir(working_dir): """ Quick check for the cur/tmp/new folders """ return os.path.exists(os.path.join(working_dir, 'cur')) and \ os.path.exists(os.path.join(working_dir, 'new')) and \ os.path.exists(os.path.join(working_dir, 'tmp'))
4e866c01f52f626aac6dd4daf2f3a1b338228822
32,206
import os def aaaaa(): """ call and return """ return os.path.exists('')
327a9d78833c32a1f283afbaa167687cf02b29c9
32,207
def sign(amount: float | int) -> str: """Returns a string representing the sign of the given amount.""" if amount < 0: return "-" elif amount > 0: return "+" else: return ""
53014ab61ff46242a9a17b9b43ac7562a4fed8b2
32,209
def even_numbers(maximum): """The even_numbers function returns a space-separated string of all positive numbers that are divisible by 2, up to and including the maximum that's passed into the function. For example, even_numbers(6) returns “2 4 6”.""" return_string = "" for x in range(2, maximum + 1, 2): return_string += str(x) + " " return return_string.strip()
241056401ae8b2dc8b906d63cdd5507b6957cdb4
32,210
def _remove_border_columns(axs, col_size): """Removes the border columns, returning the remaining columns.""" num_cols = axs.shape[1] border_indices = list(range(col_size, num_cols, col_size+1)) border_axs = axs[:, border_indices].reshape(-1) [ax.remove() for ax in border_axs] data_indices = [col for col in range(num_cols) if col not in border_indices] return axs[:, data_indices]
fb1dd856010352b0442b32dca98481f323669bbd
32,211
def sdfHasProp(mol, sdfprop): """ sdfHasProp() returns a boolean that indicates the presence of property sdfprop in molecule mol """ sdfkeyvals = mol["keyvals"] return sdfprop in [pair[0] for pair in sdfkeyvals] if sdfkeyvals else False
baf19e34452c7904501462fb79db0f52dd35cc00
32,212
def flatten_dict(d, tld="") -> dict: """Flatten the given dict recursively while prepending the upper level key to all the lower level keys separated by a dot (.)""" new_dict = {} for k, v in d.items(): if isinstance(v, dict): lower = flatten_dict(v, tld=f"{tld}{k}.") new_dict.update(lower) else: key = tld + k new_dict[key] = v return new_dict
ed44559d4a3083c51f85a55a07b820d058272412
32,213
import xxhash def get_random_partition(row, size): """用于集群场景下生成随机桶编号 用于随机分片存储, 图片存储等 Args: row: 唯一记录值, 字符串类型, 一般是实时流中记录本身 size: 桶大小 Returns: partition_id: 返回随机桶编号 """ partition_id = xxhash.xxh64(row).intdigest() % size partition_id = int(partition_id) + 1 return partition_id
ea9b0e4f5f2aee1875d5856d6ab6c65614a6a3d4
32,214
def ndvi_filter_date(image_directory): """A function that extracts the date of image collection from the imagery path name Parameters ---------- directory: directory to file including study site e.g: "'HARV/landsat-crop/LC080130302017031701T1-SC20181023151837'" Returns ------- Date string of the image directory """ filter_path = image_directory filter_year = filter_path[28:32] filter_month = filter_path[32:34] filter_day = filter_path[34:36] filter_date = filter_year + "-" + filter_month + "-" + filter_day return filter_date
57246532c67e7757dfe704f796ecd51986f2dd39
32,216
import os import requests import pandas def download_data(path, url, na_values=None): """downloads the file if it doesn't exist Args: path (str): path to the file url (str): download url na_values (str|list): what pands will interpret as missing returns: DataFrame created from the file """ if not os.path.isfile(path): response = requests.get(url) with open(path, 'w') as writer: writer.write(response.text) return pandas.read_csv(path, na_values=na_values)
66f4e4a3514d860ab36f24f3ee139d30d70598bf
32,217
def get_top_reactions(reactions, threshold): """ We get all the posts with reactions above a threshold. If there are > 5 posts, we take those 5. Any more than 9 posts, we take the top 9. :param reactions: List of reactions for each post. :param threshold: The minimum number of reactions for the post to be considered for the booster pack. :return: The sorted and filtered list of reactions above the threshold. """ reactions.sort(key=lambda x: x['total'], reverse=True) print('Sorted total high to low: ', end=" ") print(reactions) reactions = list(filter(lambda item: item['total'] >= threshold, reactions)) print(f'Posts above threshold of {threshold}:', end=" ") print(reactions) return reactions
b90ecaee2739717bfa23a727c9c6d854345943e1
32,218
import argparse def parse_args(): """ Create python script parameters. Returns ------- ArgumentParser Resulted args. """ parser = argparse.ArgumentParser( description="Extract DeepSpeech features from audio file", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "--input", type=str, required=True, help="path to input audio file or directory") parser.add_argument( "--output", type=str, help="path to output file with DeepSpeech features") parser.add_argument( "--deepspeech", type=str, help="path to DeepSpeech 0.1.0 frozen model") parser.add_argument( "--metainfo", type=str, help="path to file with meta-information") args = parser.parse_args() return args
40bcea6da729cc5e1518ef77a0e7bd70571dd0c0
32,221
def _is_top_t_vertices_connection(collapsed_term, top_t_vertices) -> bool: """ :type collapsed_term: list [of list [of string]] :param collapsed_term: list of tokenised terms collapsed from original context that will form Single-word term or Multi-word Term :param top_t_vertices: top T weighted vertices :return: True if the input contains any of top T vertex """ return any(top_t_vertex[0] in collapsed_term for top_t_vertex in top_t_vertices)
cbd069d7f0970ff8de359c09d68fa0f207f0c9a1
32,222
import os import sys def get_token(): """Returns a BSS token. """ # Valid data comes from BSS, so we need to ensure the token is set # in order to query it try: return os.environ["TOKEN"] except KeyError: print("You must set BSS TOKEN environment variable") sys.exit(2)
75748c1139f5eb8e633accd90841771e8148a660
32,225
import re def remove_header_units( headers ): """ Removes units from headers. Headers are enclosed in parenthesis. :param headers: List of headers. :returns: List of headers with units removed. """ headers = [ re.sub( '\(.*\)', '', h ).strip() for h in headers ] return headers
f05d6c86610efe192badec32fbc5077e0ed267d7
32,226
def change_centre_variant_3(centre): """Variant of 'change_centre' function to generate '-3' designs.""" return (centre[0] * 0.99, centre[1] * 0.96)
b3c48bf018fe885a383724145b9826113cce55e6
32,227
def not_0(upper, lower): """Fills both values to whichever is not equal to 0, or leaves in place.""" # Note that I compare the values to zero istead of using if not lower # This is because the values coming in can be anything, including an # empty list, which should be considered nonzero if upper == 0: return lower, lower elif lower == 0: return upper, upper else: return upper, lower
d436f764d79f1febe5dbfbf2407d5921ea06e2ab
32,228
def build_index_type_histo(ident_index): """Given an identifier index, return a histogram of types and counts :param ident_index: a dict with identifiers as keys and data items as values :returns: a histogram of types, i.e. a dict with declared types as keys and counts as values :rtype: dict """ result = {} for k, v in ident_index.items(): if type(v) is dict: itype = v.get('@type', None) if itype: prev = result.get(itype, 0) result[itype] = prev + 1 result = {k: v for k, v in sorted(result.items(), key=lambda item: -item[1])} return result
02898306dd7daa691edb733a63f25ecc6c799d4d
32,230
def toGoTexter(downInt, toGoTens, toGoOnes): """If Down is blank, skips logic and blanks toGo. Otherwise, will add ' Down' if no data for ballOn, or will add ampersand and parse spacing based on if data exists in 10s place. These steps are for text formatting assuming display is formatted to display '1st & 10' normally, with logic to display something like '2nd Down' if data is missing for where the ball is placed.""" toGo = chr(toGoTens) + chr(toGoOnes) if downInt != 32: if toGoOnes == 32: toGo = " Down" elif toGoTens == 32: toGo = " &" + toGo else: toGo = " & " + toGo else: toGo = "" return(toGo)
eebcc66165d2f345750164115e2d3c0382fed6b9
32,231
def __get_root_and_type__(element): """Get root and type from scale or chord tag""" try: root = element.getElementsByTagName("root")[0].firstChild.data type = element.getElementsByTagName("type")[0].firstChild.data return root, type except: print("No root or type found")
f9b56eebcc86eeae0d076c8408068cad7e85143d
32,232
def repos_split(repos_relpath): """Split a repos path into its directory and basename parts.""" idx = repos_relpath.rfind('/') if idx == -1: return '', repos_relpath return repos_relpath[:idx], repos_relpath[idx+1:]
3bd1d76f75664ac28d03277214b5cd9f2bdcaf05
32,234
def extract_dailyCovid_data(data): """ BeautifulSoup Object에서 book data를 추출하는 함수 :param soup: BeautifulSoup soup Object :return: contents(str) """ upload_contents = '' new_CovidInfo = data for eachData in new_CovidInfo: if eachData == 'data47': for eachKen in new_CovidInfo[eachData]: KenName = eachKen['name'] newP = eachKen['new'] cumulative = eachKen['cumulative'] # url_suffix = eachKen['name'].split(" ")[0] # url = url_prefix + url_suffix content = f"" + KenName + ": New Patient : " + str(newP) +"<br/>\n" upload_contents += content return upload_contents
5ce7711a7fbc92fcedad25f3be7a02327e29aec3
32,235
def file_size(path): """Return the size of a file""" f = open(path) f.seek(0,2) return f.tell()
915a8225c0d084efee7262383131d51ce5335aa3
32,236
from pathlib import Path import shutil def contentdir_factory(tmp_path_factory): """ Get a temporary directory for cacheable content. To add cacheable content to the directory, use hypothesis to generate some text for you. This fixture is needed because hypothesis is incompatible with function-scoped fixtures. """ def _create(): """Create the unique temporary directory""" tmpdir: Path = tmp_path_factory.mktemp( basename="fsdata", numbered=True ) if len(list(tmpdir.iterdir())) > 0: shutil.rmtree(tmpdir) return tmpdir return _create
6369cc6afeb3b6bef45ffdfd315b19993d909f90
32,238
def convert_dict_to_text_block(content_dict): """ Convert the given dictionary to multiline text block of the format key1: value1 key2: value2 """ message = "" for k, v in content_dict.items(): message += "{}: _{}_\n".format(k, v) return message
980822572c30d5af392ded834bece095c79815ce
32,240
def extract_key_with_identifier_from_ObjectSummary(identifier, objSumList): """ Extracts the key from ObjectSummary list """ return [objSum.key for objSum in filter( lambda x: x.key.find(identifier) == 0, objSumList )]
4958a88062830440daa41832f20ceed7d040e2a7
32,241
def cur_stages(iter, args): """ Return current stage. :param epoch: current epoch. :return: current stage """ # if search_iter < self.grow_step1: # return 0 # elif self.grow_step1 <= search_iter < self.grow_step2: # return 1 # else: # return 2 # for idx, grow_step in enumerate(args.grow_steps): # if iter < grow_step: # return idx # return len(args.grow_steps) idx = 0 for i in range(len(args.grow_steps)): if iter >= args.grow_steps[i]: idx = i+1 return idx
cdb9f99c40e2ba649bd150a9358647f5f5db83c7
32,243
def check_emotes(line, emotes=[]): """Checks if a specific lines contains at least one emote.""" _line = str(line).split() for word in _line: _word = word try: _word = _word.split(':')[1] _word = _word.split('\\')[0] except IndexError: pass if _word in emotes: return True return False
6bb870f19e08262b71ce39c82f9253b7abe4fc2a
32,244
def strip_absolute_path_and_add_trailing_dir(path): """ Removes the initial trailing / from the prefix path, and add the last dir one. """ return path[1:] + "/"
2ca65e30b68e5d7778821d08ee231b544fcff31a
32,245
import requests import json def InboxItemToJSON(item): """ Converts an InboxItem object into a JSON-compatible dictionary. Prefers to just use a json string. If `item` has something in its `link` field, request the InboxItem's link and rely on APIs to return the right JSONs. Recommended to just use `json_str`. Returns a placeholder dictionary on failure. item - an InboxItem object """ if not item: return None placeholder = { "type":"", "title":"Something went wrong.", "id":"", "source":"", "origin":"", "description":"There was a shared item here, but we couldn't retrieve it.", "contentType":"text/plain", "content":"", "author":{}, "categories":"", "count":0, "size":0, "comments":"", "published":"", "visibility":"PUBLIC", "unlisted":True } if item.link != "" and item.json_str == "": try: r = requests.get(item.link) d = r.json() # returns JSON, not Dict return d except Exception as e: # Can't get the object from `link` eg. doesn't exist print(e) placeholder["id"] = item.link placeholder["content"] = str(e) return placeholder else: # Use json_str instead try: d = json.loads(item.json_str) return d except Exception as e: print(e) placeholder["content"] = str(e) return placeholder
ded5b9538a7fab92c0669d59b3b279a5a5b152e0
32,246
import requests def get_bytes_from_url(url): # pragma: no cover """ Reads bytes from url. Args: url: the URL Returns: the bytes """ req = requests.get(url) return req.content
bf00ec24300167cea4f75df809d65f1991af4ea8
32,247
def echo_user(username): """ Echo the user --- tags: - user responses: default: description: Nice. """ return username, 200
ecac3965020368113f0b022cdedc7c1df644c00d
32,248
def TrimBytes(byte_string): """Trim leading zero bytes.""" trimmed = byte_string.lstrip(chr(0)) if trimmed == "": # was a string of all zero byte_string return chr(0) else: return trimmed
fa6a57e7799819790410a4e7c2a96ba4253ca88b
32,250
import random import os def sample_videos(data_reader, root_folder, num_samples, num_frames): """ Args: lines: an array contains list of images to parse data_reader: root_folder: num_samples: num_frames: Returns: - image_paths: 2D array with num_samples rows which each row contains num_frames images - labels: array of num_samples labels """ image_paths = list() labels = list() while True: if len(labels) >= num_samples: break line = next(data_reader) video_folder, label, max_frames = line.strip().split(" ") max_frames = int(max_frames) label = int(label) if max_frames > num_frames: start_index = random.randint(0, max_frames - num_frames) # start_index = max(0, int(max_frames / 2 - num_frames / 2) - 1) frame_paths = list() for index in range(start_index, start_index + num_frames): frame_path = os.path.join(root_folder, video_folder, "%04d.jpg" % index) frame_paths.append(frame_path) image_paths.append(frame_paths) labels.append(label) return image_paths, labels
92c4417b927d6473574ea0a92a702e29796972bb
32,254
def _test_example(): """ Example of how to incorporate doctests >>> _test_example() 'Pong!' :return: 'Pong!' """ return 'Pong!'
6981e2471fd73cfde20377fcaef335424a7e892e
32,255
def open_text(fname): """ open and read a text file line by line. convert to and return the list parameters: fname: string, full path to the file """ with open(fname, 'rb') as f: content = f.readlines() # you may also want to remove whitespace characters like `\n` at the end of each line content = [x.strip() for x in content] return content
c8d21244e9409fe3c8c719bd993654dc7f0a4a6b
32,259
def _make_dense_feature_vector(instance, transformers): """Make dense feature vector with value as elements.""" if len(transformers) == 1: # The feature has only one source column. transformer = transformers[0] return transformer.get_value_and_transform(instance) else: feature_vector = [] for transformer in transformers: feature_vector.extend(transformer.get_value_and_transform(instance)) return feature_vector
90c9bad2f073e0d74f05e9ef96684a3bc05263bb
32,261
import re def _get_ip(text): """Get subnet IP address.""" try: return re.findall(r"[0-9]+(?:\.[0-9]+){3}", text)[0] except IndexError: return None
abe1b0346a0a2b4db399dbdec8e7142c1aef6da4
32,262
def attestation_attestation_provider_show(client, resource_group_name=None, provider_name=None): """ Show the status of Attestation Provider. """ return client.get(resource_group_name=resource_group_name, provider_name=provider_name)
682c7945adc7f71156135d6115d078c3bf2e4a5f
32,263
from typing import Literal def escape_triple_quotes(string: str, single_quote: Literal["'", '"'] = '"') -> str: """Escape triple quotes inside a string :param string: string to escape :param single_quote: single-quote character :return: escaped string """ assert len(single_quote) == 1 quote = single_quote * 3 escaped_single_quote = rf"\{single_quote}" escaped_quote = escaped_single_quote * 3 return string.replace(quote, escaped_quote)
078e7d2ea832cb06626172e1133dcbeeaa6b7aa5
32,264
def calculate_test_values( total_words, ocr_recognized_words, tp, tn, fn ): """ Calculates the model test values : TP : True Positive (There are words and every word has been recognized) TN : True Negative (There is no word and no word has been recognized) FP : False Positive (There is no word but a word (or more) has been recognized) FN : False Negative (There are words and NOT every word has been recognized) """ if total_words == 0: tn += 1 else: if ocr_recognized_words/total_words == 1: tp += 1 else: fn += 1 return (tp, tn, fn)
e0de958ff308ac3c6a1425203ff3b92b1ecb5fca
32,265
import pkg_resources import csv def load_dict() -> dict: """ Loads reference data to dictionary. :return: dictionary of the syllable reference data """ file_name = "data.csv" file_path = pkg_resources.resource_filename(__name__, file_name) words = {} with open(file_path, newline="") as file: reader = csv.reader(file) for row in reader: words[row[0]] = int(row[1]) return words
8260366a4efaf0d2b1ab14d96c20728b1e50ffa3
32,266
def src_dst2plot_name(dm, src_ip, dst_ip): """ converts src/dst into readable form based on endnode names""" return "{}--{}".format( dm.ndm.getEndNodeByIp(src_ip).name, dm.ndm.getEndNodeByIp(dst_ip).name )
5214091d63e1b3daf3ff4bef44855ca3e52895d9
32,268
def validate_concat(col1, col2, max_len=1000): """ Determine if concatination is needed, by checking for dublicate in subject and body. Max length of sequence (text) is set. Characters surpassing max_len are cut-off. """ text_concat = [] if isinstance(col1, str): col1, col2 = [col1], [col2] for n, (sub, des) in enumerate(zip(col1, col2)): try: if sub in des[:len(sub)]: text_concat.append(des[:max_len]) else: new_line = sub + '. ' + des text_concat.append(new_line[:max_len]) except Exception as e: text_concat.append(des[:max_len]) return text_concat
2913fa4cff07f87c33b9340dc2aab565578d8f88
32,269
import math def getDistance(x1, y1, z1, x2, y2=None, z2=None): """Returns the distance between two points, either 3-dimensional ones or 2-dimensional ones. Please use the components of them in a row as parameters. For example, if you've 2d points: A(10|20), B(30|40) getDistance(10, 20, 30, 40) And if you've 3d points: C(50|60|70), D(80|90|100) getDistance(50, 60, 70, 80, 90, 100) Args: x1 (float): x-coord of first point y1 (float): y-coord of first point z1 (float): z-coord of first point x2 (float): x-coord of second point y2 (float, optional): y-coord of second point z2 (float, optional): z-coord of second point Returns: float: distance between given points """ if y2 is None: return math.sqrt((z1 - x1) ^ 2 + (x2 - y1) ^ 2) else: return math.sqrt((x1 - x1) ^ 2 + (y2 - y1) ^ 2 + (z2 - z1) ^ 2)
3065ed9840e841a12d0c18d0b003ac4227a49a2a
32,270
def coords_api_to_json_pos(x_cm_right, y_cm_forward): """converts from API coordinates to robot coordinates. returns x, y""" x_cm_forward = y_cm_forward y_cm_left = -x_cm_right return x_cm_forward, y_cm_left
ce23c06634cdffc02b574040ced449b701670571
32,271
def return_code_from_exception(exception): """Returns the exit code that would result of raising the exception.""" if exception is None: return 0 if isinstance(exception[1], SystemExit): return exception[1].code return 1
de92ca34d3959a59b6c485ad2cdaf2f1d4628a8e
32,272
def first_neg(x): """ Finds index of first negative number in a list """ res = [i for i, x in enumerate(x) if x < 0] return None if res == [] else res[0]
ddde2f4b6d19ca5b80d956fdf9f4c8b3f8b40335
32,274
import random def backoff_time(attempt, retry_backoff=2., max_delay=30.): """Compute randomized exponential backoff time. Args: attempt (int): attempt number, starting at zero. Keyword Args: retry_backoff(float): backoff time on the first attempt. max_delay(float): maximum returned value. """ delay = retry_backoff * (2 ** attempt) # Add +-25% of variation. delay += delay * ((random.random() - 0.5) / 2.) return min(delay, max_delay)
907e636dc60a81fa9d7d0ebf5c42841b828a693c
32,275
def get_fresh(old_issue_list, new_issue_list): """Returns which issues are not present in the old list of issues.""" old_urls = set(x['url'] for x in old_issue_list) return [x for x in new_issue_list if x['url'] not in old_urls]
b79313c53f66694038871bd94969b8b297c211a7
32,278
from typing import List def count_increases(measurements: List[int]) -> int: """Count the number of times the depth measurement increases.""" total = 0 past = 9999999 # Make the first comparison arbitrarily large. for measurement in measurements: if measurement > past: total += 1 past = measurement return total
97ad846b0547a3f989805a5deacc0619f45b1cb3
32,279
import math def floatfmt(f, n=4, s=4, max_width=None, default='NaN', use_scientific=False): """ f: value to format n: number of sig figs use scientific notation if f<10^-n (e.g n=#.## f=0.00001) or f>10^(s+1) (e.g s=### f=3001) """ if isinstance(f, str): return f if f is None: return default absf = abs(f) if absf < 1e-20: v = '0.0' else: if absf < math.pow(10, -n) or absf > math.pow(10, s + 1): if use_scientific: fmt = '{{:0.{}E}}'.format(s) else: if absf < math.pow(10, s + 1): # f = Decimal(f) # n = int(math.ceil(abs(math.log10(absf)))) n = int(round(abs(math.log10(absf)))) fmt = '{{:0.{}f}}'.format(n) else: fmt = '{{:0.{}f}}'.format(n) v = fmt.format(f) if max_width: if len(v) > max_width: v = v[:max_width] return v
b8bd82d04cc502fbac481cdf4ecf9b0900b2736e
32,280
import math def distance_calc(point1, point2): """ to calculate the distance of 2 points :param point1: pt1 :param point2: pt2 :return: the distance """ return math.sqrt((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2)
47be46da7e8251ff15fd2c5eb1b8be7d11d4a416
32,284
def clean_reg_name(reg, list): """ Clean the name of the registers by removing any characters contained in the inputted list. """ for char in list: reg = reg.replace(char, '') return reg
70bcd35e498c61c4ab1d53896d85fcd4fb74b5ff
32,286
import time def get_now_time_ns(): """get the time of second""" time_second = int(time.time_ns()) return time_second
d6452107b524ae4ae0bd6b747f9a12d9b8115818
32,288
def count_crickMAX(args): """Count the number of sequences in the Crick fasta file""" with open(args.crick, 'r') as crick_in: count = 0 for line in crick_in: if line.startswith('>'): count +=1 return count
c4937613b917107f74aa6658719d3e6e243eebd2
32,289
import os def create_dir(path): """ mkdir the provided path :param path: The path to the directory to be made :return: True if mkdir was successful, False if dir already exists :rtype: bool """ try: os.mkdir(path) #log.info("Successfully created directory [%r]" % path) return True except OSError: pass #log.exception("Directory creating failed in [%r]. Directory already exists. " % path) return False
838c265e4b7ba825864b9826dd5df92f6253e3e1
32,290
from pathlib import Path def mock_cli_config(): """Mock CLIConfig object.""" class MockCLIConfig(object): def __init__(self): self.services_setup = False def get_project_dir(self): return Path("project_dir") def get_instance_path(self): return Path("instance_dir") def get_services_setup(self): return self.services_setup def update_services_setup(self, is_setup): self.services_setup = bool(is_setup) def get_project_shortname(self): return "project-shortname" def get_db_type(self): return "postgresql" def get_file_storage(self): return "local" return MockCLIConfig()
7dc747210d5009d494468c60466949cf0aef9b54
32,291
import numpy def calc_q_sq(q, flag_q: bool = False): """ q is matrix q_11, q_22, q_33, q_12, q_13, q_23 Output is quadratic form """ q_11, q_22, q_33 = q[0], q[1], q[2] q_12, q_13, q_23 = q[3], q[4], q[5] qo_11 = numpy.square(q_11) + numpy.square(q_12) + numpy.square(q_13) qo_22 = numpy.square(q_12) + numpy.square(q_22) + numpy.square(q_23) qo_33 = numpy.square(q_13) + numpy.square(q_23) + numpy.square(q_33) qo_12 = q_11 * q_12 + q_12 * q_22 + q_13 * q_23 qo_13 = q_11 * q_13 + q_12 * q_23 + q_13 * q_33 qo_23 = q_12 * q_13 + q_22 * q_23 + q_23 * q_33 qo = numpy.stack([qo_11, qo_22, qo_33, qo_12, qo_13, qo_23], axis=0) dder = {} if flag_q: zero = numpy.zeros(q_11.shape, dtype=float) dder_11 = numpy.stack([2*q_11, zero, zero, 2*q_12, 2*q_13, zero], axis=0) dder_22 = numpy.stack([zero, 2*q_22, zero, 2*q_12, zero, 2*q_23], axis=0) dder_33 = numpy.stack([zero, zero, 2*q_33, zero, 2*q_13, 2*q_23], axis=0) dder_12 = numpy.stack([q_12, q_12, zero, q_11+q_22, q_23, q_13], axis=0) dder_13 = numpy.stack([q_13, zero, q_13, q_23, q_11+q_33, q_12], axis=0) dder_23 = numpy.stack([zero, q_23, q_23, q_13, q_12, q_22+q_33], axis=0) if q.dtype == complex: dder["q_real"] = numpy.stack([dder_11, dder_22, dder_33, dder_12, dder_13, dder_23], axis=0) dder["q_imag"] = 1j*dder["q_real"] else: dder["q"] = numpy.stack([dder_11, dder_22, dder_33, dder_12, dder_13, dder_23], axis=0) return qo, dder
26d066c1adc71fc37be32d05c8f07e0a17bd1777
32,292
import re def should_ignore(ignore_list, url): """ Returns True if the URL should be ignored :param ignore_list: The list of regexs to ignore. :param url: The fully qualified URL to compare against. """ for pattern in ignore_list: compiled = re.compile(pattern) if compiled.search(url): return True return False
03cc80a4611434ebc04d92885698f61c0871cb0a
32,294
async def evaluate_requested_value(laid_card, input_foo): """ Function used to evaluate requested value of cards when player play jack special card. :param laid_card: tuple with last played card :param input_foo: function used to ask player about value :return: string object of requested value or None """ value = laid_card[1] requested_value = None if value == 'J': requested_value = await input_foo('Enter VALUE of requested cards: ') if requested_value not in '5 6 7 8 9 10'.split(): requested_value = None return requested_value
e3bb40ca5b3f16cef09d8847ddf4de7311e1a66e
32,295
def to_camel(snake_str: str) -> str: """ Converts snake_case_string to camelCaseString """ first, *others = snake_str.split("_") return "".join([first.lower(), *map(str.title, others)])
a6f5e917de58360e61ad74ed0b8b6ece13032268
32,296
def apply_multiplicities(data): """ Takes in the Data class of scattering data returned by convert_to_numpy() and applies the multiplicity to each intensity. Sets the multiplicity to 1 to avoid accidental double-use. Parameters ---------- data : Data Returns ------- data: Data Updated with multiplicities """ for i in range(len(data.tth)): data.I[i] *= data.mult[i] data.mult[i] = 1 return data
89904a9f2602ded84d96de92f8f1da6b53420179
32,297
def beta_pruning_terminal_state(state, depth_remaining, time_remaining): """ Terminal state returns True when we reach a terminal state, or when we run out of time or depth we're allowed to search :param state: The state to evaluate if it is in a terminal state :param depth_remaining: The remaining depth :param time_remaining: The time remaining :return: True if we hit a terminal or cut-off state, False otherwise """ if depth_remaining <= 0 or time_remaining <= 0: return True end_state_nodes = [] for alpha in list(map(chr, range(104, 112))): # iterate from h-o end_state_nodes.append(str(alpha)) if state in end_state_nodes: return True return False
e78801a07cc571697a35997ef084c0356e68981b
32,298
def identity(value, other): """Identity check using ID""" return value is other
42d8e6939880494137eef9285e447e82cf44f16b
32,299
def _urljoin(*parts): """Concatenate url parts.""" return '/'.join([part.strip('/') for part in parts])
4886102b52461944f3c63ab7c0695218698cb47a
32,300
def parse_qualifier_block(text): """Parse qualifiers from qualifier block. Qualifiers are split by newline -> 21 spaces -> slash If value, Remove leading/trailing ", leading newline -> 21 spaces Otherwise it's boolean, e.g. /pseudo, so set True Store qualifiers of same type in lists, otherwise just str """ qualifiers = {} gap = "\n" + " " * 21 for qualifier in text.split(f"{gap}/"): key, *value = qualifier.split("=") # Determine if boolean or str value if value: value = value[0].lstrip('"').strip('"\n').replace(gap, "") else: value = True # Save multiple qualifiers of same type in lists if key in qualifiers: if isinstance(qualifiers[key], list): qualifiers[key].append(value) else: qualifiers[key] = [qualifiers.pop(key), value] else: qualifiers[key] = value return qualifiers
843b9cd472d16716baf7969f9e62f539d8a0e986
32,301
def file_to_dataset(file): """Example function to derive datasets from file names""" if "ZJet" in file: return "Z" elif "WJet" in file: return "W" elif "HToInvisible" in file: return "Hinv"
4e759054df3889e3d6f1ca1b0d3ed080f035d233
32,303
import requests def search(query): """ Send query to Reddit search """ reddits = [] if query: headers = {'User-Agent' : 'Mozilla/5.0'} params = { 'q': query, 'limit': 10, 'sort': 'relevance', 't': 'month' } r = requests.get('http://www.reddit.com/search.json', params=params, headers=headers) d = r.json() if 'error' in d: if d['error'] == 429: raise Exception('You are being rate limited by Reddit') else: raise Exception(d['error']) if 'data' in d and 'children' in d['data']: for r in d['data']['children']: reddits.append({ 'id': r['data']['id'], 'title': r['data']['title'], 'permalink': 'http://www.reddit.com'+r['data']['permalink'] }) return reddits
3075fbfd0e633dea6c8ca17d8ca26c3cdcaaaf9d
32,304
def identity(arg): """ Simple identity function works as a passthrough. """ return arg
e702e3af1a4894c124686667038d6d7ead37a4b6
32,305
def _get_node_count(graph_entry): """ Get number of nodes""" return graph_entry.vcount()
e5e9992aadfa0d2f84c698b1ceeae0c5f500c72e
32,306
import os def util_func(x): """Square numbers and return process in which function was run Parameters ---------- x : int or float Returns ------- int or float Squared value of ``x`` int Process ID in which function was run """ return x**2, os.getpid()
3988121535b623c13ca84e13b9b702b3c98c111d
32,307
def find_person(data: list, lineno: int) -> dict: """ Function to find a person in a list of dictionaries representing individuals in the CPS """ for person in data: if person["a_lineno"] == lineno: return person # raise an error if they're never found msg = f"Person with line number {lineno} not found in" f"household." raise ValueError(msg)
c568ba1ee56f016f6f082a9a9a65b20cf399968a
32,308
def fuzzy_list_match(line, ldata): """ Searches for a line in a list of lines and returns the match if found. Examples -------- >>> tmp = fuzzy_list_match("data tmp", ["other", "data", "else"]) >>> print(tmp) (True, "data") >>> tmp = fuzzy_list_match("thing", ["other", "else"]) >>> print(tmp) (False, None) """ for match in ldata: if match in line: return True, match return False, None
8cbc92634859b991ac77b6bfab7c8825b9b108bb
32,309
def tikznode(x, y, text, modifier=""): """ Return string for TikZ command to draw a node containing the text at (x,y), with optional `modifier` string to set draw style. """ return f"\\draw ({x}, {y}) {modifier} node{{{text}}};" if not (text == "") else ""
173d13a038bcda3da75aa8d203f63fce5f22ba92
32,310
def data_is_complete(stream): """Returns True if there is 1001*3 points in win""" data_size = len(stream[0].data) + len(stream[1].data) + len(stream[2].data) if data_size == 3003: return True else: return False
b9b510d7fd1042a3f96aaae284f050a864b8d541
32,311
from datetime import datetime def ensure_date(s): """YYYY-MM-DD""" if s is None: return None try: datetime.strptime(s, '%Y-%m-%d') return s except ValueError: return None
1386de8c71a944bca8a9501e8cf191d35d80e9e3
32,312
def get_literal_list(f): """ return the literals of the formula @param f: @return: set of the literals """ literal_list = [] for claus in f: literal_list += claus.literals return literal_list
6f33e09e9e0fbb5c0c2991997c6de7bef0e284e6
32,313
def distance(x0, y0, x1, y1): """distance between points""" dx = x1 - x0 dy = y1 - y0 dist = ((dx ** 2) + (dy ** 2)) ** 0.5 return dist
6e857156f16e5d1bfd8a26686f1df921dfa60b62
32,314
from pathlib import Path from typing import Tuple def parse_delta(filename: Path) -> Tuple[int, int]: """Return (alignment length, similarity errors) tuple from passed .delta. :param filename: Path, path to the input .delta file Extracts the aligned length and number of similarity errors for each aligned uniquely-matched region, and returns the cumulative total for each as a tuple. Similarity errors are defined in the .delta file spec (see below) as non-positive match scores. For NUCmer output, this is identical to the number of errors (non-identities and indels). Delta file format has seven numbers in the lines of interest: see http://mummer.sourceforge.net/manual/ for specification - start on query - end on query - start on target - end on target - error count (non-identical, plus indels) - similarity errors (non-positive match scores) [NOTE: with PROmer this is equal to error count] - stop codons (always zero for nucmer) To calculate alignment length, we take the length of the aligned region of the reference (no gaps), and process the delta information. This takes the form of one value per line, following the header sequence. Positive values indicate an insertion in the reference; negative values a deletion in the reference (i.e. an insertion in the query). The total length of the alignment is then: reference_length + insertions - deletions For example: A = ABCDACBDCAC$ B = BCCDACDCAC$ Delta = (1, -3, 4, 0) A = ABC.DACBDCAC$ B = .BCCDAC.DCAC$ A is the reference and has length 11. There are two insertions (positive delta), and one deletion (negative delta). Alignment length is then 11 + 1 = 12. """ in_aln, aln_length, sim_errors = False, 0, 0 for line in [_.strip().split() for _ in filename.open("r").readlines()]: if line[0] == "NUCMER" or line[0].startswith(">"): # Skip headers continue # Lines with seven columns are alignment region headers: if len(line) == 7: aln_length += abs(int(line[1]) - int(line[0])) + 1 # reference length sim_errors += int(line[4]) # count of non-identities and indels in_aln = True # Lines with a single column (following a header) report numbers of symbols # until next insertion (+ve) or deletion (-ve) in the reference; one line per # insertion/deletion; the alignment always ends with 0 if in_aln and line[0].startswith("0"): in_aln = False elif in_aln: # Add one to the alignment length for each reference insertion; subtract # one for each deletion val = int(line[0]) if val < 1: # deletion in reference aln_length += 1 elif val == 0: # ends the alignment entry in_aln = False return aln_length, sim_errors
724d1dcfcc16174fac45d13f049eb56d039cff2b
32,316
from re import T from re import A import math def F_squared_open(omega): """For an open circuit at the end of the transmission line. (The impedance of a transmission line with an open circuit at the end is the same as that of a line with a short at the end and which is an additional 1/4 wavelength longer.) """ theta = omega * T return ((A + 1) ** 2 + 1 / math.tan(theta) ** 2) / ( ((A - 1) / math.tan(theta)) ** 2 + (A + 1) ** 2 )
f6b5a38a6846f2eb577b184be7595c06abf3fdde
32,317
import numpy def distance(p, q): """Calculates Euclidean, D4 and D8 distances between two points. Args: p : The first point. Either a tuple, a list or an array. q : The second point. Either a tuple, a list or an array. Returns: A list containing Euclidean, D4 and D8 distances between p and q. """ p = numpy.asarray(p).reshape(-1) q = numpy.asarray(q).reshape(-1) assert p.size == q.size # calculate absolute difference d = numpy.abs(p-q) # calculate Euclidean distance dE = numpy.sqrt(numpy.sum(d**2)) # calculate D4 distance d4 = numpy.sum(d) # calculate D8 distance d8 = numpy.max(d) return [dE, d4, d8]
cd3bafc6ddc38fe0dd6a8d2374e7477324a5f1d4
32,319
def find_peak(list_of_integers): """ Function that finds a peak in a list of unsorted integers. Args: list_of_integers (int): Unrdered integer list to find the peak Returns: The peak value """ if len(list_of_integers) == 0: return None if len(list_of_integers) == 1: return list_of_integers[0] if list_of_integers[1] <= list_of_integers[0]: return list_of_integers[0] if list_of_integers[-1] >= list_of_integers[-2]: return list_of_integers[-1] mid = len(list_of_integers) // 2 if list_of_integers[mid] >= list_of_integers[mid - 1] \ and list_of_integers[mid] >= list_of_integers[mid + 1]: return list_of_integers[mid] if list_of_integers[mid + 1] > list_of_integers[mid]: return(find_peak(list_of_integers[mid + 1:len(list_of_integers)])) if list_of_integers[mid - 1] > list_of_integers[mid]: return(find_peak(list_of_integers[0:mid]))
36564fb46b597000593ac4c4e894325957793e04
32,320
import re def reduceBlank(text, keepNewLines=False): """ Strip a string and reduce all blank space to a unique space. If you set keepNewLines as True, it will keep a unique '\n' at each blank space which contains a '\n' or a '\r' """ if text is None: return None text = text.strip() if not keepNewLines: return re.sub(r'\s+', ' ', text) else: text = re.sub(r'\r', '\n', text) text = re.sub(r'\s*\n+\s*', '\n', text) text = re.sub(r'[ \t\f\v]+', ' ', text) return text
9203db3cb6bf3d1dccf5b175725a21e1e94ae812
32,321
import torch def generate_uniform_mask(features, missing_rate): """ Parameters ---------- features : torch.tensor missing_rate : float Returns ------- mask : torch.tensor mask[i][j] is True if features[i][j] is missing. """ mask = torch.rand(size=features.size()) mask = mask <= missing_rate return mask
d47b7997ce9014264f89e2a1afffec12b4f4a4bb
32,322
def evaluate_one_operation(exp: str, operator: str) -> str: """For the first matching operation in the parm flat expression, replace it - and the 2 operands around it - with the results of the operation.""" # A flat expression contains no brackets. assert '(' not in exp assert ')' not in exp terms = exp.split(' ') # Find the first occurrence of required operation in the list of terms. operation_pos = terms.index(operator) # For an infixed operation, the operands are either side of the operator. operand1 = int(terms[operation_pos - 1]) operand2 = int(terms[operation_pos + 1]) assert operator in ['+', '*'] if operator == '+': calculation = operand1 + operand2 else: calculation = operand1 * operand2 # Reconstruct the string for the whole expression, with the one operation found replaced with it's result. result = '' pos = 0 for term in terms: if operation_pos - 1 <= pos <= operation_pos + 1: if operation_pos == pos: result += str(calculation) + ' ' else: result += term + ' ' pos += 1 return result.strip() # Each concatenation also adds a space, so last space needs to be removed.
87ca1495d200d69b034f693ac3932f1066c21970
32,323
def GrossRet(r,delta): """Compute the gross return on saving Args: r (float): rental rate of capital delta (float): capital depreciation rate Returns: (float): gross return on saving """ return 1+r-delta
cb9ede129b9ce8d578b3d9343ec95b5e90da56c0
32,326
def get_dhcp_server(network=None, asset=[]): """Return IP address of DHCP server based on network or asset. Args: network: string, white, green or blue asset: string, asset name Return: dhcp_server: string, IP address """ if network == 'blue' or asset[:1] == 'b': dhcp_server = '172.16.2.10' elif network == 'green' or asset[:1] == 'g': dhcp_server = '10.0.2.10' elif network == 'white' or asset[:1] == 'w': dhcp_server = '194.47.252.134' else: dhcp_server = None return dhcp_server
e6c6a7f8410a4c2151ffb104eca2da380cbbe151
32,328
from pathlib import Path def get_image_filenames(images_directory, image_band="i", check_band=False): """ Retrieves a list of all available filenames for a given directory, and a given band. WARNING: Optimized for HSC filename format (ex: HSC-I_9813_4c3.fits). """ image_filenames = [] images = Path(images_directory).rglob('*.fits') for image in images: image = str(image) if check_band: image_no_path = image.split("/")[len(image.split("/")) - 1] filename_band = image_no_path.split("_")[0].split("-")[1].lower() if filename_band == image_band: image_filenames.append(image) else: image_filenames.append(image) return image_filenames
61e9d1a396570fed6fbaa22ba0b0958f08988037
32,330
def create_skeleton(segments, html): """ Create skeleton file :param segments: Translation segemnts :type segments: list :param html: source html document :type html: str :return: document skeleton :rtype: str """ for i, seg in enumerate(segments, 1): html = html.replace(seg, '{{{{%{}%}}}}'.format(i), 1) return html
a7b17830b3e5ac9a45a198a4675e26de34ee7ce1
32,331
def update_replace_copy_dev(playbook): """ when developer clones playbook/integration/script it will automatically renamed to be _copy or _dev this function will replace _copy or _dev with empty string :param playbook: playbook dict loaded from yaml :return: updated playbook dict """ playbook["name"] = playbook["name"].replace("_copy", "").replace("_dev", "") playbook["id"] = playbook["id"].replace("_copy", "").replace("_dev", "") for task_id, playbook_task in playbook.get("tasks", {}).items(): inner_task = playbook_task.get("task", {}) if "scriptName" in inner_task: playbook["tasks"][task_id]["task"]["scriptName"] = playbook["tasks"][task_id]["task"]["scriptName"]\ .replace("_copy", "")\ .replace("_dev", "") if "playbookName" in inner_task: playbook["tasks"][task_id]["task"]["playbookName"] = playbook["tasks"][task_id]["task"]["playbookName"]\ .replace("_copy", "")\ .replace("_dev", "") if "script" in inner_task: playbook["tasks"][task_id]["task"]["script"] = playbook["tasks"][task_id]["task"]["script"] \ .replace("_copy", "") \ .replace("_dev", "") return playbook
d0c1ac2b823d8698faec924972cd9a3c6ef00681
32,333
def med_min_2darray(a): """Takes in a list of lists of integers and returns the minimum value.""" return min(min(inner) for inner in a)
c30a7588a11e2c23829fe73b6d84ffdaee8fb321
32,334
def define_title(overlap_kind): """ This function sets the specification of the title of the plots. :param bool overlap_kind: the boolean that determines if the overlap is per pixel or shoebox :returns: title """ if overlap_kind: title = "per pixel" else: title = "per shoebox" return title
60b08193f083277eacf7314e88831deab23940a4
32,335
import sqlite3 def read_metadata_table(db_file_name): """Read data from the metadata table of DB""" conn = sqlite3.connect(db_file_name) c = conn.cursor() #Read all rows from metadata TABLE c.execute('SELECT * FROM metadata') metadata_contents = c.fetchall() conn.close() return metadata_contents
f743f49c7608816e02d110ddd8e224b110fbe4cf
32,336