content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def read_projects(path): """Reads a list of openstack projects from a file. Example file:: $ cat tools/oslo.txt openstack/oslo.i18n """ raw_projects = [] with open(path) as fh: for line in fh.read().splitlines(): line = line.strip() if not line or line.startswith("#"): continue else: raw_projects.append(line) return raw_projects
41defcbf1b39dd8e0e29e15b0704278695d760b9
43,007
def annotation_filter(metadata): """Used from the template to produce the conversion logo""" source=metadata["source"]["all"] if source=="automatic": return '<span class="hint--top hint--info" data-hint="Automatic conversion"><i class="fa fa-cogs"></i></span>' elif source=="semi-automatic": return '<span class="hint--top hint--info" data-hint="Automatic conversion with manual corrections"><i class="fa fa-cogs"></i><i class="fa fa-check"></i></span>' elif source=="manual": return '<span class="hint--top hint--info" data-hint="Full manual check of the data"><i class="fa fa-user"></i></span>' else: return '<span class="hint--top hint--info" data-hint="Unknown">?</span>'
1f049685ba4e5ea886f4804ca341b5d68d86f14f
43,009
def _ok_to_all_filter(x): """This is the default filter function.""" return True
bf78924eb68b21f736366007f083bfb09c8dedcd
43,010
def get_urls(session, name, data, find_changelogs_fn, **kwargs): """ Gets URLs to changelogs. :param session: requests Session instance :param name: str, package name :param data: dict, meta data :param find_changelogs_fn: function, find_changelogs :return: tuple, (set(changelog URLs), set(repo URLs)) """ # if this package has valid meta data, build up a list of URL candidates we can possibly # search for changelogs on if "versions" in data: candidates = set() for version, item in data["versions"].items(): if "homepage" in item and item["homepage"] is not None: if isinstance(item["homepage"], list): candidates.add(*item["homepage"]) else: candidates.add(item["homepage"]) if "repository" in item and item["repository"] is not None: if "url" in item["repository"]: repo = item["repository"]["url"] elif "path" in item["repository"]: repo = item["repository"]["path"] else: continue repo = repo.replace("git://", "https://").replace(".git", "") candidates.add(repo) return find_changelogs_fn(session=session, name=name, candidates=candidates) return set(), set()
73f198080e88c88c2fa345711e43c44dc26d87d7
43,011
def parse_seed(seeds): """ parse the seed provided by the user in arguments """ return seeds.strip().split("|")
7aa62d5dd868256434359ed78ec5bd314c8d2984
43,012
import re def reformat_id(s): """Reformat ID string. Valid IDs must be lowercase and single dash-delimited. """ s = s.replace("'", "") s = re.sub(r"[ \"'\.?!/]", "-", s) s = re.sub(r"\-+", "-", s) return s.lower()
0043304ace403b864b0b7e2d9397fe2f6bc8e45e
43,013
def compare(a, b): """None-aware comparison helper for Lisplet. >>> compare(None, None) 0 >>> compare(None, 12) -1 >>> compare(12, None) 1 >>> compare(12, 12) 0 >>> compare(12, -12) 1 >>> compare(-12, 12) -1 """ if a is None: if b is None: return 0 return -1 if b is None: return 1 if a < b: return -1 if a > b: return 1 return 0
c5fd25f613dc0727c0db0a10dbad4ef3b3d93235
43,014
def GetDetails(folder): """ Get date, name, url from entries in target folder including sub-folders 2021/5/21 koizumi """ result = [] for item in folder.Items(): if item.IsFolder: ret = GetDetails(item.GetFolder) # recurcive call if ret is not None: result += ret else: url = folder.GetDetailsOf(item, 0) name = folder.GetDetailsOf(item, 1) date = folder.GetDetailsOf(item, 2) result.append([date, name, url]) return(result)
930a1daf9789d670d01c4372a1ae0bce5e5a5189
43,016
def get_fields(data): """Get the metadata fields from a given h5 file by scalping""" return data['train'][data['train'].keys()[0]].attrs.keys()
3e4cc32cf1488a2deed4f38cb8ddc3cbc7036dbc
43,017
def recursive_search( arr: list, x: int, x_max: int, y: int, y_max: int, word: str, index: int, w_len: int, direction: str, ) -> bool: """ dir: None - both directions are available h - horizontally v - vertically """ if index == w_len: return True if x > x_max or y > y_max: return False if direction == "h": return arr[x][y] == word[index] and recursive_search( arr, x + 1, x_max, y, y_max, word, index + 1, w_len, "h" ) else: return arr[x][y] == word[index] and recursive_search( arr, x, x_max, y + 1, y_max, word, index + 1, w_len, "v" )
36b02446300b74f26ec6328e774eeb16c5a33814
43,018
def center_crop_numpy(img, cropx, cropy): """ Givenn an image numpy array, perform a center crop. Args: img : numpy image array cropx : width of crop cropy : height of crop Returns: cropped numpy image array """ y,x = img.shape[:-1] startx = x//2-(cropx//2) starty = y//2-(cropy//2) return img[starty:starty+cropy, startx:startx+cropx, :]
ab69271a947906a6ea515f6846d5c09614b15853
43,019
import binascii def base642bin(base64_str: bytes) -> bytes: """Convert base64 to bytes. Keyword arguments: bin_str -- Base64 representation as bytes Returns: Ascii string representation as bytes """ return binascii.a2b_base64(base64_str)
c0d531a6f4db3920c5d3bc1ac7da31151dfef483
43,020
def consecutive_repetitions(string): """ Given a non-empty string, it returns a substring containing the consecutive repetitions of the first character of the original string. @args: - string (str): the string to be evaluated. """ if len(string) == 1: return string char_to_match = string[0] current_index = 1 repetitions = char_to_match while current_index < len(string) and char_to_match == string[current_index]: repetitions += string[current_index] current_index += 1 return repetitions
d3a6d0b0c42f28e548cee403f7f2131c5780a532
43,021
def formatFieldProperty(fieldName,formatFn,**kwargs): """Returns a property that formats a field as specified.""" def formatter(self): v=getattr(self,fieldName) return formatFn(v,**kwargs) return property(formatter)
8571cddb37b2f4f85ebf3f761dbe0779c179edf1
43,023
import json def extract_genres(genres_str): """Extracts the genres in string form as a list of genres Arguments: genres_str {string} -- string containing the genres Returns: list -- the extracted genres """ genres_str = genres_str.replace("'", '\"') genres_json = json.loads(genres_str) genres_list = [] for elem in genres_json: genres_list.append(elem['name']) return genres_list
34dcc0ad7927f61610ac393f71bc744fff18e215
43,025
import time def cookie_to_har(cookie): """ Convert a Cookie instance to a dict in HAR cookie format. """ c = { 'name': cookie.name, 'value': cookie.value, 'secure': cookie.secure, } if cookie.path_specified: c['path'] = cookie.path if cookie.domain_specified: c['domain'] = cookie.domain if cookie.expires: tm = time.gmtime(cookie.expires) c['expires'] = time.strftime("%Y-%m-%dT%H:%M:%SZ", tm) http_only = cookie.get_nonstandard_attr('HttpOnly') if http_only is not None: c['httpOnly'] = bool(http_only) if cookie.comment: c['comment'] = cookie.comment return c
bcfe6258a95b4eea0632023399b3540db6574426
43,027
def _grouped_split(text, sep): """Hsplit helper for case where group=True and attach=True (see hsplit docs). Old re.find() method didn't work right when sep had special characters (e.g. "\n"). """ res = [] toks = text.split(sep) max_idx = len(toks) - 1 for i, tok in enumerate(toks): if tok: if i < max_idx: tok += sep res.append(tok) elif i < max_idx: if res: res[-1] += sep else: res.append(sep) return res
9ddf4735def367dde2e78f77958299adf0989b20
43,028
def questionize_label(word): """Convert a word to a true/false style question format. If a user follows the convention of using `is_something`, or `has_something`, for a boolean value, the *property* text will automatically be converted into a more human-readable format, e.g. 'Something?' for is_ and Has Something? for has_. Args: word (str): The string to format. Returns: word (str): The formatted word. """ if word is None: return '' if word.startswith('is_'): return '{0}?'.format(word[3:]) elif word.startswith('has_'): return '{0}?'.format(word[4:]) return word
d69e100a3b37f2632a6e0400c395f99fcba00b4c
43,029
def is_not_empty_value(value): """ Checks for empty response values. Demisto recommends returning the None type if a value is empty, rather than an empty string/list. """ return value != "" and value != [] and value != [""]
202edb774c04b00095be9c96b65614b1dbfdbb28
43,030
import math def get_distance(a, b): """Return Euclidean distance between points a and b.""" return math.sqrt(pow(a.x - b.x, 2) + pow(a.y - b.y, 2))
89c66e586a37a88dbce25460ad3310a035044c73
43,031
def sortList(arr, sort_by, reverse): """ It sorts the products list based on the flags provided as arguments. Currently, it supports sorting by price. Parameters- SortBy- "pr": sorts by price, "ra": sorts by rating Returns- Sorted list of the products based on the parameter requested by the user """ # remove empty items arr = list(filter(lambda x: x["title"] != "" and x["price"] > 0, arr)) if sort_by == "price": missing_price_list = list(filter(lambda x: x["price"] == "", arr)) arr = list(filter(lambda x: x["price"] != "", arr)) arr = sorted(arr, key=lambda x: x["price"], reverse=reverse) arr.extend(missing_price_list) return arr if sort_by == "rating": missing_rating_list = list(filter(lambda x: x["rating"] == "", arr)) arr = list(filter(lambda x: x["rating"] != "", arr)) arr = sorted(arr, key=lambda x: x["rating"], reverse=reverse) arr.extend(missing_rating_list) return arr return arr
3254ef14ec4aae155089a1ef3122b47cf4e059c6
43,032
def get_master_ip(k8s_conf): """ Returns maser node ip """ master_ip = list() for i in k8s_conf['kubernetes']['node_configuration'] : if i['host'].get('node_type') == 'master': master_ip.append(i['host'].get('ip')) return master_ip
a8556c4bad23d884b65b8c74344cca1c0d34b40a
43,033
def list_string(alist, key=lambda a: a): """Given items a, b, ..., x, y, z in an array, this will print "a, b, ..., x, y and z" """ if len(alist) == 0: return "[empty]" elif len(alist) < 2: return str(key(alist[0])) elif len(alist) == 2: return "{} and {}".format(str(key(alist[0])), str(key(alist[1]))) alist = list(map(str, map(key, alist))) return ", and ".join([", ".join(alist[:-1]), alist[-1]])
1b7b2bf14eb3b9d91abe7ef60937c71dd7135544
43,034
def alive_cell_neighbours(x, y, board): """ Намира броя живи съседни клетки на дадената. Напр. за да вземете състоянието на клетката отгоре, използвайте board[x - 1, y] (x - 1, y - 1) | (x - 1, y) | (x - 1, y + 1) -------------------------------------------- (x, y - 1) | (x, y) | (x, y + 1) -------------------------------------------- (x + 1, y - 1) | (x + 1, y) | (x + 1, y + 1) """ count = 0 for i in [-1, 0, 1]: for j in [-1, 0, 1]: count += board[x + i][y + j] return count - board[x][y]
ba4bc969ee0994170f994d926e605b6c72cadf35
43,036
def no_antislash(path): # type: (str) -> str """ \dir1\dir2\file.txt -> /dir1/dir2/file.txt/ \dir1\dir2\ -> /dir1/dir2/ """ return path.replace('\\', '/')
df7a421b56e6057b6782db5e1d116e0ad309b107
43,037
def square_crop(im, target_size=None): """ Crop image to `target_size`. If that's None the image is squared to the smallest size """ w = im.size[0] h = im.size[1] target_size = target_size if target_size else min(w, h) dx = (w - target_size) / 2 dy = (h - target_size) / 2 return im.crop((dx, dy, dx + target_size, dy + target_size))
28fb58b21ca4b15e6d48c9345fb31aa333cd7276
43,038
def output(s): """Convert to text output format.""" out = ' 1 2 3 4 5 6 7 8 9\n' out += ' +-----+-----+-----+\n' for i in range(9): out += str(i + 1) + '|' for j in range(9): v = s[i * 9 + j] if v == 0: out += ' ' else: out = out + str(v) if j % 3 == 2: out += "|" else: out += ' ' out += '\n' if i % 3 == 2: out += ' +-----+-----+-----+\n' return out
f0c80b7ec7350813dd80f2aab8c1f6f3e05847da
43,040
def clean_df(df): """ Renomeia as colunas e subistitue os missings values e 0s Arg: df(dataframe): df com as informações Returns: df(dataframe): retorna o dataframe formatado """ cols = ['marca', 'modelo', 'preço', 'transmissão', 'tipo', 'ano' , 'kilometragem', 'potência', 'combustível', 'direção', 'cor', 'portas', 'financiamento'] #Retorna valores missing como 'sem_informacao' string df[cols] = df[cols].replace(0, 'sem informacao') #formato o nome da marca df[cols] = df[cols].replace('gmchevrolet', 'chevrolet') df[cols] = df[cols].replace('vwvolkswagen', 'volkswagen') #formata o valor da potência df[cols] = df[cols].replace('2.02.9', '2.0_2.9') df[cols] = df[cols].replace('4.0oumais', '4.0+') #formata a quilometragem pela média df['kilometragem'] = df['kilometragem'].replace(0, int(df['kilometragem'].mean())) return df
d9c7f7af28c2452cd43625c94971d1c485627f78
43,041
import os def random_bits(bitnum): """Get random bits""" return os.urandom(1)[0] % 2 ** bitnum
1eb2a6e6347d5e9dff263c3eb2c70f705f147658
43,042
import argparse import pickle import csv import os import gzip def main(argv): """\ """ p = argparse.ArgumentParser(description=main.__doc__) p.add_argument("--multi-idx") p.add_argument("--info-csv") p.add_argument("--output-cdbg-record") p.add_argument("--output-cdbg-annot") args = p.parse_args(argv) assert args.multi_idx assert args.output_cdbg_record assert args.output_cdbg_annot with open(args.multi_idx, "rb") as fp: catlas_base, records_to_cdbg, cdbg_to_records = pickle.load(fp) print( f"loaded {len(cdbg_to_records)} cdbg to sequence record mappings for {catlas_base}" ) cdbg_info = {} with open(args.info_csv, "rt") as fp: r = csv.DictReader(fp) for row in r: cdbg_id = int(row["contig_id"]) cdbg_info[cdbg_id] = row print(f"loaded {len(cdbg_info)} info records for {catlas_base}") with open(args.output_cdbg_record, "wt") as fp: w = csv.writer(fp) w.writerow( [ "filename", "record_name", "catlas_base", "cdbg_file", "prospective_reads_file", "total_kmers", "mean_abund", ] ) filenum = 0 for (filename, record_name), cdbg_ids in records_to_cdbg.items(): if not cdbg_ids: continue # write out cdbg id list cdbg_file = f"record{filenum}-nbhd.cdbg_ids.txt.gz" cdbg_file = os.path.join(os.path.dirname(args.output_cdbg_record), cdbg_file) with gzip.open(cdbg_file, "wt") as outfp: outfp.write("\n".join([str(i) for i in cdbg_ids])) # generate *name* of reads.fa.gz file to make reads_file = f"record{filenum}-nbhd.reads.fa.gz" reads_file = os.path.join(os.path.dirname(args.output_cdbg_record), reads_file) # generate info: total k-mers, mean_abund / weighted total_kmers = 0 summed_abund = 0 for cdbg_id in cdbg_ids: info = cdbg_info[cdbg_id] n_kmers = int(info["n_kmers"]) mean_abund = float(info["mean_abund"]) total_kmers += n_kmers summed_abund += n_kmers * mean_abund average_abund = summed_abund / total_kmers w.writerow( [ filename, record_name, catlas_base, cdbg_file, reads_file, total_kmers, average_abund, ] ) filenum += 1 print(f"wrote {filenum+1} files containing cdbg_ids.") # write out cdbg id to record mapping, one per row with open(args.output_cdbg_annot, "wt") as fp: w = csv.writer(fp) w.writerow( [ "filename", "record_name", "record_number", "catlas_base", "cdbg_id", ] ) filenum = 0 for (filename, record_name), cdbg_ids in records_to_cdbg.items(): if not cdbg_ids: continue record_number = f"record{filenum}" for cdbg_id in cdbg_ids: cdbg_id = str(cdbg_id) w.writerow( [ filename, record_name, record_number, catlas_base, cdbg_id, ] ) filenum += 1 return 0
e7c2524da375f1dadbd94484407fc187c463e0e5
43,043
def annotate(origin, reference, ops, annotation=None): """ Uses a list of operations to create an annotation for how the operations should be applied, using an origin and a reference target :param origin: The original iterable :type origin: str or list :param reference: The original target :type reference: str or list :ops: The operations to apply :type ops: list of Operation :param annotation: An optional initialization for the annotation. If none is provided, the annotation will be based on the origin. If one is provided, it will be modified by this method. :return: An annotation based on the operations :rtype: list of str """ annotation = annotation or list(origin) for oper in ops: oper.annotate(annotation, origin, reference) for i, label in reversed(list(enumerate(annotation))): if label[0] == '+' and i > 0: annotation[i-1] += label del annotation[i] return annotation
d6012776b26d90944af535ce04a9d25e6298ffe7
43,044
import queue def pop_with_ack(q, max_batch_size: int): """ Args: q max_batch_size (int) Returns: (msgs, acks) """ assert max_batch_size > 0 batch = [q.get(block=True)] for i in range(max_batch_size - 1): try: msg = q.get_nowait() batch.append(msg) except queue.Empty: break return list(zip(*batch))
d5466a9ccd67df51753b756d560f4d91ac37831b
43,045
from typing import Dict from typing import Callable import torch def evaluate_dict( fns_dict: Dict[str, Callable], source: torch.Tensor, target: torch.Tensor, reduction: str = "mean" ) -> Dict: """Evaluate a dictionary of functions. Examples -------- > evaluate_dict({'l1_loss: F.l1_loss, 'l2_loss': F.l2_loss}, a, b) Will return > {'l1_loss', F.l1_loss(a, b, reduction='mean'), 'l2_loss': F.l2_loss(a, b, reduction='mean') Parameters ---------- fns_dict: Dict[str, Callable] source: torch.Tensor target: torch.Tensor reduction: str Returns ------- Dict[str, torch.Tensor] Evaluated dictionary. """ return {k: fns_dict[k](source, target, reduction=reduction) for k, v in fns_dict.items()}
3ff55304f2b16440683ed80b7f5447ef6e9782da
43,047
def replace_apostrophes(input: str) -> str: """Treats the presence of apostrophes so it doesn't break the XPath filter expression. Args: input (str | int): input Returns: str: XPath filter expression with apostrophes handled. """ if not isinstance(input, str): return str(input) if "'" in input: prefix: str = "" elements = input.split("'") output = "concat(" for s in elements: output += prefix + "'" + s + "'" prefix = ',"\'",' output += ")" return output else: return "'" + input + "'"
11df3265a8ed69999bf43d427dd8ec66ee88ccfb
43,048
def my_round(x, base=15): """Summary Args: x (TYPE): Description base (int, optional): Description Returns: TYPE: Description """ # https://stackoverflow.com/questions/2272149/round-to-5-or-other-number-in-python return int(base * round(float(x) / base))
106580e2ffa717534973a538f95bc8a2a3ca3af0
43,049
def read_fasta_file(lines): """Read a reference FASTA file. This function is built for many entries in the FASTA file but we should be loading in one reference at a time for simplicity's sake. If we need to extract from multiple references then we should just run the program multiple times instead. Parameters ---------- lines: list of str - Output from file.readlines() Returns ------- entries: dict key: Name of the FASTA entry value: DNA sequence of the FASTA entry """ entries = dict() # Read sequences cur_entry = "" cur_seq = "" for i, line in enumerate(lines): # Strip whitespace line = line.strip() # If not the name of an entry, add this line to the current sequence # (some FASTA files will have multiple lines per sequence) if ">" not in line: # Skip if empty if not line: pass else: cur_seq = cur_seq + line # Force to uppercase _cur_seq = list(cur_seq.upper()) # Throw an error for non canonical bases # https://www.bioinformatics.org/sms/iupac.html # for char in _cur_seq: # if char not in 'ATCGURYSWKMBDHVN': # error_msg = 'Non canonical base: \"{}\" in sequence {} on line {}.'.format(char, line, i) # raise Exception(error_msg) # IUPAC also defines gaps as '-' or '.', # but the reference shouldn't have gaps. # Maybe I can add this in later... # Replace the sequence with the edited one cur_seq = "".join(_cur_seq) # Start of another entry = end of the previous entry if ">" in line or i == (len(lines) - 1): # Avoid capturing the first one and pushing an empty sequence if cur_entry: entries[cur_entry] = cur_seq # Clear the entry and sequence cur_entry = line[1:] # Ignore anything past the first whitespace if cur_entry: cur_entry = cur_entry.split()[0] cur_seq = "" return entries
bba7b647e55a183b97beeeeb845ee71321ac5169
43,052
import json def j(d): """ Print dict as json view """ try: d.pop('_id', None) return json.dumps(d, indent=4, default=str, ensure_ascii=False) except: return d
ce4fe6c15df9c5e73b6ec00cb6795ada4629eb47
43,054
def pos_tag_trigram_frequency(text): """ :type text: Text :param text: The text to be analysed :rtype Dict :returns Dict mapping pos tag to frequency per n terms """ doc_trigrams = text.document.pos_tag_trigram_freq.most_common(20) feature_vector = [] tag_fd = text.pos_tag_trigram_freq # return {trigram: (tag_fd[trigram] / tag_fd.N()) for trigram in trigrams} for n_gram, freq in doc_trigrams: feature_vector.append(0.0 if tag_fd.N() == 0 else (tag_fd[n_gram] / tag_fd.N())) return feature_vector
de49281fa79e73853fd7f6cc4385156a35263e34
43,057
def degree_sign(): """Return a degree sign for LaTeX interpreter.""" return r'$^\circ$'
c8deccf7ee5d80d71b584d81fe11a80dcc90488c
43,059
from typing import Dict from typing import Any def rename_dict_keys(input_dict: Dict[str, Any], prefix: str) -> Dict[str, Any]: """ Creates a copy of an input_dict with keys carrying the prefix specified """ current_keys = input_dict.keys() new_keys = [prefix + i for i in current_keys] new_dict = { new: input_dict[current] for current, new in zip(current_keys, new_keys) } return new_dict
32bdd0a6f8046bb6528d2cdaf45c237c972996d1
43,060
import argparse import sys def _parse_arguments() -> argparse.Namespace: """Parse arguments given by the user. Returns ------- args : :class:`argparse.NameSpace()` Arguments provided by the user and handled by argparse. """ parser = argparse.ArgumentParser( prog="OCR recognition", description="Take a picture and return the text in the picture", ) parser.add_argument( "input", help="Picture to parse.", ) if len(sys.argv[1:]) == 0: parser.print_help() sys.exit() args = parser.parse_args() return args
db5ece190046b0dc562b1ae65fd23ff4c53913b2
43,061
import sqlite3 def conectar(): """ Função para conectar ao servidor """ conn = sqlite3.connect('psqlite3.geek') conn.execute("""CREATE TABLE IF NOT EXISTS produtos( id INTEGER PRIMARY KEY AUTOINCREMENT, nome TEXT NOT NULL, preco REAL NOT NULL, estoque INT NOT NULL);""" ) return conn
ac89aedd54ea569e921f5863d34872230d8fee85
43,063
def fill_launch_template(launch_temp, model_info): """ Write relevant information into the launch file template. """ # Replace template values launch_content = launch_temp launch_content = launch_content.replace("$FILENAME$", model_info.name) return launch_content
a6604871aa73d137fbd658a8aa258ccc20914691
43,065
def _get_mapping_dict(ch_names: list[str]) -> dict: """Create dictionary for remapping channel types. Arguments --------- ch_names : list Channel names to be remapped. Returns ------- remapping_dict : dict Dictionary mapping each channel name to a channel type. """ remapping_dict = {} for ch_name in ch_names: if ch_name.startswith("ECOG"): remapping_dict[ch_name] = "ecog" elif ch_name.startswith(("LFP", "STN")): remapping_dict[ch_name] = "dbs" elif ch_name.startswith("EMG"): remapping_dict[ch_name] = "emg" elif ch_name.startswith("EEG"): remapping_dict[ch_name] = "eeg" elif ch_name.startswith( ("MOV", "ANALOG", "ROT", "ACC", "AUX", "X", "Y", "Z", "MISC") ): remapping_dict[ch_name] = "misc" else: remapping_dict[ch_name] = "misc" return remapping_dict
782cda9c43749f71241dbef65f5654eafd7e07f4
43,066
import os def extract_roots(config_path, config_dict, bin_root): """Get the location of the various root directories used by weewx.""" root_dict = {'WEEWX_ROOT': config_dict['WEEWX_ROOT'], 'CONFIG_ROOT': os.path.dirname(config_path)} # If bin_root has not been defined, then figure out where it is using # the location of this file: if bin_root: root_dict['BIN_ROOT'] = bin_root else: root_dict['BIN_ROOT'] = os.path.abspath(os.path.join( os.path.dirname(__file__), '..')) # The user subdirectory: root_dict['USER_ROOT'] = os.path.join(root_dict['BIN_ROOT'], 'user') # The extensions directory is in the user directory: root_dict['EXT_ROOT'] = os.path.join(root_dict['USER_ROOT'], 'installer') # Add SKIN_ROOT if it can be found: try: root_dict['SKIN_ROOT'] = os.path.abspath(os.path.join( root_dict['WEEWX_ROOT'], config_dict['StdReport']['SKIN_ROOT'])) except KeyError: pass return root_dict
2941e4f6b250c85857afd270208e0478fee5ea30
43,067
def hash(h, key): """Function leaves considerable overhead in the grid_detail views. each element of the list results in two calls to this hash function. Code there, and possible here, should be refactored. """ return h.get(key, {})
05928d6214f8ba185e95e9b09ba417e1656cb1bb
43,069
from datetime import datetime def day_and_time_as_string(): """ This function ... :return: """ return datetime.now().strftime("%A, %d. %B %Y %I:%M%p")
f768920fa63bdae51d4e73a20af4a5fcc2fbec62
43,070
import argparse def pos_int_validator(arg): """ Check that number is integer and positive """ num = int(arg) if num > 0: return num else: raise argparse.ArgumentTypeError("{} - must be a positive number".format(arg))
4405001da4e8c044b3809e73182815292fd9f3ca
43,072
import shutil def find_program(*programs): """Returns the path to the first program in PATH with a name in `programs`. Returns None on failure.""" for prog in programs: val = shutil.which(prog) if val: return val return None
92d3c49f9b7738c203f4dd1f55252052001ed5b3
43,073
def calculate_overlap_area(cloth: list) -> int: """ Calculate the total area of overlapping claims :param cloth: List of claims made on each square inch of the cloth :return: Area of overlapping claims """ area = 0 for row in cloth: for col in row: area += (len(col) >= 2) return area
fa357c69e095571670ef8650c53d577b42ce09b1
43,074
import io import base64 import numpy def _ndarray_decoder(dct): """Decoder from base64 to numpy.ndarray for big arrays(states)""" if isinstance(dct, dict) and 'b64npz' in dct: output = io.BytesIO(base64.b64decode(dct['b64npz'])) output.seek(0) return numpy.load(output)['obj'] return dct
de5fb5213f7f6fd1cec08017554ace5bcc1948f3
43,075
import os import sqlite3 def get(): """ Get the quartiles score for each dispersion of a given background subtraction mode :param bg: "global" for Poisson distribution (only one currently supported) :return: """ dbfile = os.path.join(os.path.dirname(__file__), 'db', 'quartiles.db') con = sqlite3.connect(dbfile) cur = con.cursor() cur.execute('SELECT disp, quartile, value, version FROM quartile WHERE active=1') res = cur.fetchall() cur.close() con.close() quartiles = {} version = None for row in res: version = row[3] if row[0] not in quartiles: quartiles[row[0]] = {row[1]: row[2]} else: quartiles[row[0]][row[1]] = row[2] return quartiles, version
353d224fb07db9c9d098b14b46f6c4bbb11eec4b
43,077
def filter_by_condi(a, condi, ll=None, pr=None, mod=None, dd=None): """ Filter a matrix of trial data to only get the data from trials that match the specified condition(s) :param a: A numpy array with the data from one trial on each row :param condi: A list of tuples indicating the list length, presentation rate, modality, distractor duration of each trial :param ll: Return only trials with this list length condition (ignore if None) :param pr: Return only trials with this presentation rate condition (ignore if None) :param mod: Return only trials with this presentation modality condition (ignore if None) :param dd: Return only trials with this distractor duration condition (ignore if None :return: A numpy array containing only the data from trials that match the specified condition(s) """ ind = [i for i in range(len(condi)) if ((ll is None or condi[i][0] == ll) and (pr is None or condi[i][1] == pr) and (mod is None or condi[i][2] == mod) and (dd is None or condi[i][3] == dd))] if len(ind) == 0: return None return a[ind]
0143dbb23434ef8ad259b3ff6247417d8be7e691
43,079
def lst_helper( l: list ) -> list: """convenience""" return list(map(str, l))
0a889c69e4f9ed57200a3962b1f8adcbc5fcc5df
43,080
def isotopeMaxBD(isotope): """Setting the theoretical max BD shift of an isotope (if 100% incorporation). Parameters ---------- isotope : str name of isotope Returns ------- float : max BD value """ psblIsotopes = {'13C' : 0.036, '15N' : 0.016} try: return psblIsotopes[isotope.upper()] except KeyError: raise KeyError('Isotope "{}" not supported.'.format(isotope))
7fff5bc6a54034e68357af6a08e000de34d59283
43,081
import re def parse_frequency(freq): """ Parses a frequency string and returns the number of seconds. Supported formats: 1s, 1m, 1h, 1d, 1w, 1y """ m = re.search('^(\d+)(s|m|h|d|w|y)$', freq.lower()) if m is None: raise ValueError('Input not in required format') multipliers = { 's': 1, 'm': 60, 'h': 3600, 'd': 86400, 'w': 604800, 'y': 31536000, } return int(m.group(1)) * multipliers[m.group(2)]
f08306fcf95ca86a4caa5344e629974c5c20d008
43,082
def construct_doc2author(corpus, author2doc): """Make a mapping from document IDs to author IDs.""" doc2author = {} for d, _ in enumerate(corpus): author_ids = [] for a, a_doc_ids in author2doc.items(): if d in a_doc_ids: author_ids.append(a) doc2author[d] = author_ids return doc2author
4f07174d9569019fa2488320052952e110addaeb
43,083
def versionless(package): """ Removes the version from the package reference """ return package[:1+package[1:].find('@')]
2f52ab9bb406df8a2e74ee56c8f4631cfa83ee6d
43,084
def is_valid_field(field, allow_quote=False, minimum=None, maximum=None): """ Validates a generic user inputted field, such as a "name" for an object. For now, it basically only validates whether single quote characters should be allowed in the string. :type field: str :param field: The data to be validated. :type allow_quote: bool :param allow_quote: If True, a single quote character (') will be allowed to pass validation. :type minimum: int :param minimum: If defined, values with fewer characters than this value will be rejected. :type maximum: int :param maximum: If defined, values with more characters than this value will be rejected. :rtype: bool :return: True or False depending on whether field passes validation. """ if field is None: return False if not allow_quote and "'" in field: return False if minimum: if len(field) < minimum: return False if maximum: if len(field) > maximum: return False return True
375b96d891a37115d8a367bc228e020f719da946
43,085
import math def bin_search_right(query, data): """ Query is a coordinate interval. Approximate binary search for the left coordinate of the query in data sorted by the right coordinate. Finishes when the first interval in data with a left coordinate that is greater than the query's right coordinate is found. Kept separate for my own readability. """ lower, upper, i = 0, len(data), int(math.floor(len(data)/2)) if upper == 0: return set() while True: if lower == i or upper == i: if i + 1 < len(data) and data[i+1][0] <= query[1]: upper = i = i + 1 else: break elif data[i][0] < query[1]: lower = i i = int(math.floor((lower + upper)/2.)) else: upper = i i = int(math.floor((lower + upper)/2.)) return data[max(0, i-40):i+1]
48cd68e44f312c798acdb1a8f120583392853629
43,086
def set_bits(n, start, end, value): """Set bits [<start>:<end>] of <n> to <value> and return <n>""" mask = ( 1 << end ) - ( 1 << start ) return (int(n) & ~mask) | (int(value) << start) & mask
203fb5d94750534dbeb136dc3e580be2f7c9d68d
43,088
import os def issue_dmget(path): """ Issue a dmget command to the system for the specified path """ cmd = ("dmget %s &" %path) out = os.system(cmd) return out
f0f1d95cb98625ca3938c4c1000a2cfcf9030420
43,089
from typing import Dict def namelink(name: str, *dicts: Dict[str, str]) -> str: """Make a link for an identifier, given name-to-URL mappings.""" for the_dict in dicts: if name in the_dict: return f'<a href="{the_dict[name]}">{name}</a>' # LOGGER.warning(f"Failed to find link for {name}") return name
931206b39854069130c8af0c581eb7d3e12f88e1
43,091
from typing import Set from pathlib import Path def seqcheck(dels: Set[Path]): """We should have 1...7 among the deleted""" names = sorted([x.name for x in dels]) nums = (str(x) for x in range(8)) return all(y in x for (x, y) in zip(names, nums))
18762b116d15fe0846b7195d43f0ce8165e72fb9
43,093
def _process_string(value): """Strip a few non-ascii characters from string""" return value.strip('\x00\x16')
5a318bb0336d5b358ef856c39f88bd5037880a2c
43,094
def calc_ios(bbox_1, bbox_2): """Calculate intersection over small ratio This is a variant of more commonly used IoU (intersection over union) metric All coordinates are in the order of (ymin, xmin, ymax, xmax) Args: bbox_1: bbox_2: Returns: """ def cal_area(bbox): # calculate the area for a bbox in format (y_min, x_min, y_max, x_max) return max(bbox[2] - bbox[0], 0) * max(bbox[3] - bbox[1], 0) ymin_1, xmin_1, ymax_1, xmax_1 = bbox_1 ymin_2, xmin_2, ymax_2, xmax_2 = bbox_2 x_min = max(xmin_1, xmin_2) y_min = max(ymin_1, ymin_2) x_max = min(xmax_1, xmax_2) y_max = min(ymax_1, ymax_2) area_intersection = cal_area([y_min, x_min, y_max, x_max]) area_small = min(cal_area(bbox_1), cal_area(bbox_2)) ios = area_intersection / area_small return ios
73070323a33f869354337739a25a484625f2b7ba
43,095
def transcribe(seq: str) -> str: """ transcribes DNA to RNA by generating the complement sequence with T -> U replacement """ # simple checks -- ennsure nonzero sequence, capitalize seq, ensure only A, C, G, T in seq. dna = {'A', 'C', 'G', 'T'} if len(seq) == 0: raise ValueError("You passed in an empty sequence.") seq = seq.upper() if not set(seq).issubset(dna): # if the sequence contains other characters raise ValueError("Your sequence can only contain A, C, G, and T, but you included a sequence with" f" {set(seq) - dna}.") complement = {"A": "U", "T": "A", "C": "G", "G": "C"} return ''.join(complement[base] for base in seq)
ebcaed8ddb01bacfa7acd3362b404fdafcb1aba1
43,096
def bbox_splitter(label_list): """ Bbox splitter is a DataGenerator label creator, which returns the data in 5 list. Can be used to supply to the data generator from the DataGenerationLib as label creator. :param label_list: contains the labels in format [[[x,y,width,height], bbox_category_id], ...] :return: 5 lists: x1, y1, x2, y2, category """ x1 = [] y1 = [] x2 = [] y2 = [] category = [] for bbox_label, category_label in label_list: x1.append(bbox_label[0]) y1.append(bbox_label[1]) x2.append(bbox_label[0] + bbox_label[2]) y2.append(bbox_label[1] + bbox_label[3]) category.append(category_label) return x1, y1, x2, y2, category
e275fe67cfbc95d74ca9271c5e722588e91194b7
43,097
import requests def request_weather(url): """request the weather from openweathermap.org API. Returns a dict of the json file""" response = requests.get(url) response_dict = response.json() return response_dict
c6ae4d38cb849b7956a505a2f0c15c2c1bc98da0
43,099
def range_intersect(r1: range, r2: range): """ Check if range is intersected References ---------- Stack Overflow, https://stackoverflow.com/questions/6821156/how-to-find-range-overlap-in-python """ return range(max(r1.start, r2.start), min(r1.stop, r2.stop)) or None
7953a354fc479777cc81ef39a51d3feae714111f
43,100
def get_reason_from_exception(ex): """ Turns an exception into a string similar to the last line of a traceback. """ return '{}: {}'.format(ex.__class__.__name__, str(ex))
2c8b5d3114c6b950eaef1383f2e88d380f38c965
43,101
def shrink(pos, fact_x, fact_y=None): """ Shrink networkx positionings """ if fact_y is None: fact_y = fact_x return {i: (x*fact_x, y*fact_y) for i, (x, y) in pos.items()}
075ced8c46bd9181bed5e9de206e853fb2a508bd
43,102
def unit_conversion_value(df, **kwargs): """Multiply value by conversion ratio.""" df['Value_num'] = df['Value_num'] * df['Ratio'] return df
a13d50ece16e3354cb01da91dee1efb8e942c659
43,104
import os import re def nb_last_created_patch(input_path): """This function returns the maximum number contained in the folder names of a specific path Args: input_path (str): path where we search for the folder names Returns: max_numb (int): highest number across folder names folder_list_only_landmark_patches (list): it contains the path to landmark patches (if any) folder_list_only_random_patches (list): it contains the path to the random patches (if any) """ if not os.path.exists(input_path): # if path does not exist os.makedirs(input_path) # create it folder_list = os.listdir(input_path) # save in list all the folder names folder_list_only_landmark_patches = [] # initialize as empty list; if it will be changed, it means that there are already landmark negative patches in the folder folder_list_only_random_patches = [] # initialize as empty list; if it will be changed, it means that there are already random negative patches in the folder # if list is non-emtpy if folder_list: folder_lists_only_numbers = [int(re.sub("[^0-9]", "", item)) for item in folder_list] # retain from each folder name only the numbers folder_list_only_landmark_patches = [item for item in folder_list if "landmark" in item] folder_list_only_random_patches = [item for item in folder_list if "random" in item] max_numb = max(folder_lists_only_numbers) # extract highest number # instead, if there are already other folders else: max_numb = 1 return max_numb, folder_list_only_landmark_patches, folder_list_only_random_patches
82f12847dd61a737acef0beb2186a4965f0368a9
43,105
def get_precision(TP, FP): """ precision positive predictive value """ precision = TP / (TP + FP) return precision
bac45a0d5642289aed707427797e86881e330eab
43,106
def is_scalar(x): """True if x is a scalar (constant numeric value) """ return isinstance(x, (int, float))
243786089e4fa7d1a05fa3b8873b87b43ece20a7
43,107
def cpe_compare_version(rule_version, rule_update, conf_version): """ :param rule_version: :param rule_update: :param conf_version: :return: """ rule_version = rule_version.lower() rule_update = rule_update.lower() conf_version = conf_version.lower() result = False try: if rule_version in conf_version and '*' not in rule_update: conf_version_sub = conf_version[len(rule_version):] if conf_version_sub[0] in ('.',): conf_version_sub = conf_version_sub[1:] for i in range(0, len(rule_update)): if conf_version_sub[i] != rule_update[i]: conf_version_sub_suffix = conf_version_sub[i:] if rule_update.endswith(conf_version_sub_suffix): result = True break except IndexError as ex: pass return result
ed1fe4876cada0b10e141481b0b99d1ff1962b2e
43,109
import functools def update_wrapper(wrapper, wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): """ Update a wrapper function to look like the wrapped function. Modified version to support partial and other non-__dict__ objects. See functools.update_wrapper for full documentation. """ for attr in assigned: try: setattr(wrapper, attr, getattr(wrapped, attr)) except AttributeError: pass for attr in updated: try: getattr(wrapper, attr).update(getattr(wrapped, attr, {})) except AttributeError: pass # Return the wrapper so this can be used as a decorator via partial() return wrapper
e40b2abcdd1593f87418340b769484ebea2f4746
43,110
import textwrap def registrationRoutine(allowedPlugins): """ Build content of file for plugin registration """ return "\n".join([ textwrap.dedent(f""" try: from kikit.plugin import {x[0]} {x[0]}.plugin().register() except ImportError: pass """) for x in allowedPlugins])
06dd02313273ac1af1eaebfa47677b7cfe591a09
43,111
import json def read_json(file_path): """ Function to read a json file into a data dictionary structure :param file_path: Path and name of json file to read :return: A data dictionary with the json content as dict >>> read_json('./data/test/test_puzzle.json') {'train': [{'input': [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 'output': [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]]}, {'input': [[0, 0, 0, 8, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 'output': [[0, 0, 0, 8, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 4, 0, 4, 0]]}], 'test': [{'input': [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], 'output': [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 7, 0, 0]]}]} """ with open(file_path) as json_file: data_dict = json.load(json_file) return data_dict
dbf06a8002f1bda0963eebc2045e9716fb69d938
43,113
import re def natural_sort_key(text): """ Sort numeric parts of text numerically and text parts alphabetically. Example: >>> sorted(["9 Foo", "10 Foo", "9A Foo"], key=natural_sort_key) ['9 Foo', '9A Foo', '10 Foo'] """ return [int(part) if part.isdigit() else part for word in text.split() for part in re.split('(\d+)', word)]
a34793ef0b98cf91d2aac8807bcb60e47e197b1c
43,116
def get_subreport_zeros_and_ones(subreport, n_bits): """Gets the number of zeros and ones that have each column within the submarine report. Args: subreport (): Subset to count from. n_bits (int): How many bits have each binary number. Returns: tuple: Zeros and ones that each column has. """ zeros = [0]*n_bits ones = [0]*n_bits for bits in subreport: for i in range(n_bits): if bits[i] == '1': ones[i] += 1 elif bits[i] == '0': zeros[i] += 1 return zeros, ones
2c8f2839a78690e7056761513d804948e8d1a79d
43,117
import os def list_no_hidden(d): """ Lists all elements of a directory except for hidden elements that being with a period. """ return [f for f in os.listdir(d) if not f.startswith('.')]
0b485c9b830151861b11990a9db79c371844763d
43,118
import random def is_win_prize1(win=0.5): """ 按几率中奖,返回结果是否中奖 :param win: 中奖几率,取值范围0~1 :return: True / False """ result = False percent = random.random() if percent < win: result = True return result
e9e5ae4dd85245e10c980f6fdd27f5c3b4affdec
43,120
def checkCardInHandClicked(game, clickX, clickY): """ Returns the card in current player's hand that was clicked, else None """ for card in reversed(game.currPlayer.hand): # Reversed so checked from top layer down on gui if card.imageObj.collidepoint(clickX, clickY): return card
17e8293c1536e3a99f5e24a43cceb0a090a78f5d
43,121
import types def iscoroutine(object): """Return true if the object is a coroutine.""" return isinstance(object, types.CoroutineType)
e2b60ba01ddf3a9863be2773128d35cb12adf1c4
43,122
def mask_shift_set(value, mask, shift, new_value): """ Replace new_value in value, by applying the mask after the shift """ new_value = new_value & mask return (value & ~(mask << shift)) | (new_value << shift)
e6ed92669b7f4fb85a4d96cf130dcdf9ffe3d950
43,123
from typing import Callable def raise_not_implemented_gen(message: str) -> Callable[[], None]: """ Make a function that will raise a NotImplemented error with a custom message """ def not_implemented() -> None: raise NotImplementedError(message) return not_implemented
03aac0a266c686db439ff179c80b0c213a5bb033
43,124
import collections def get_frequency(data): """ Arg: data(list of str): Return: keys(list of str): vals(list of str): """ counter = collections.Counter(data) keys = [pair[0] for pair in sorted(counter.items(), key=lambda x: x[1])] vals = sorted(counter.values()) return keys, vals
7d7a4b00a99b83f0612fab295db24255f7f81d98
43,125
def hypot(x, y): """Return the Euclidean norm, sqrt(x*x + y*y). :type x: numbers.Real :type y: numbers.Real :rtype: float """ return 0.0
09780ebc086eb0203187553d730deca529a1e150
43,126
import platform def is_wsl(): """ Return whether we are running under the Windows Subsystem for Linux """ if 'linux' in platform.system().lower(): with open('/proc/version', 'r') as f: if 'Microsoft' in f.read(): return True return False
ac2e0beee6d83b4b32af97f1572d29534d4389c9
43,129
import torch def sentences_similarity(first_sentence_features, second_sentence_features) -> float: """ Given two senteneces embedding features compute cosine similarity """ similarity_metric = torch.nn.CosineSimilarity() return float(similarity_metric(first_sentence_features, second_sentence_features))
f7ba47162e9a2348eba23a134c474f400fc3da3a
43,130
def Capitalize(text : str): """Returns Capitalized text.""" return text.capitalize()
955db0a852c14b654fcf489d3f77ce6dbca6bf95
43,131
def get_param_dict(job, model_keys): """ Get model parameters + hyperparams as dictionary. """ params = dict((" ".join(k.replace(".__builder__", "").split(".")), job.get("hyper_parameters." + k, None)) for k in model_keys) return params
0cbb24dde3ab28b41b2b5af6fe6f3bdfc66da9bb
43,132
def align_dims(a, b, axis): """Returns a broadcastable version of b which allows for various binary operations with a, with axis as the first aligned dimension.""" extra_dims = a.ndim - (axis + b.ndim) if extra_dims < 0: raise ValueError('Must have a.ndim >= axis + b.ndim.') if extra_dims > 0: order = ('x',) * axis + tuple(range(b.ndim)) + ('x',) * extra_dims b = b.dimshuffle(*order) return b
b5081437ceb6a3623607d23c4f394990088298c6
43,135
def split_in_parts(coords, to_remove): """splits the trajecotories by the stops that are within the centres (indicated in the to_remove vector)""" parts = [] part = [] prev_truevalue = True for coord, truevalue in zip(coords, to_remove): if not truevalue: part.append(coord) elif truevalue and not prev_truevalue: parts.append(part) prev_truevalue = truevalue parts.append(part) return 0 if parts == [[]] else parts
07ede7904b843afb061914136ba3f81835fc0bec
43,137
import time def get_interval_number(ts, duration): """Returns the number of the current interval. Args: ts: The timestamp to convert duration: The length of the interval Returns: int: Interval number. """ return int(time.mktime(ts.timetuple()) / duration)
2fe90e1e40f7a1d76c8a4410295c5bd80e5e83e6
43,138
from typing import Dict from typing import Any def get_user_name(instance: Dict[str, Any]) -> str: """Methods gets short name, combined name or whatever you call it from instance-dict, which should contain first_name, last_name, username and title Analogue to __str__ method from Openslides3 """ first_name = instance.get("first_name", "").strip() last_name = instance.get("last_name", "").strip() if first_name and last_name: name = " ".join((first_name, last_name)) else: name = first_name or last_name or instance.get("username", "") if title := instance.get("title", "").strip(): name = " ".join([title, name]) return name
c7c583ae7df2a0a7f34a8c7e3100abcb0a9315e4
43,139
import tempfile import os def split_file(splitNum, fileInput, lines): """ split_file is used to split fileInput into splitNum small pieces file. For example, when splitNum is 56, a 112 lines file will be split into 56 files and each file has 2 lines. :param splitNum: split into splitNum files :param fileInput: file to be split :param lines: lines of fileInput """ quot = lines // splitNum rema = lines % splitNum files = [] current_line = 0 for i in range(splitNum): if i < rema: read_line = quot + 1 else: read_line = quot temp = tempfile.NamedTemporaryFile() os.system("head -n%d %s| tail -n%d > %s" % (current_line + read_line, fileInput, read_line, temp.name)) current_line += read_line files.append(temp) return files
c2e8eb8599545c5755dbb013fc17698a0711bd90
43,140
import asyncio def async_test(fun): """Run an async function: .. code-block:: python >>> from pygada_runtime import test_utils >>> >>> @test_utils.async_test ... async def main(): ... pass >>> >>> main() >>> :return: decorated function result """ def wrapper(*args, **kwargs): return asyncio.run(fun(*args, **kwargs)) return wrapper
ad3130f45d2f24f315d832d48574fa18ed408664
43,141