content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def spectral_projection(u, eigenpairs): """ Returns the coefficients of each eigenvector in a projection of the vector u onto the normalized eigenvectors which are contained in eigenpairs. eigenpairs should be a list of two objects. The first is a list of eigenvalues and the second a list of eigenvectors. The eigenvectors should be lists. There's not a lot of error checking on lengths of arrays, etc. so be careful. """ coeff = [] evect = eigenpairs[1] for ev in evect: c = sum([evv * uv for (evv, uv) in zip(ev, u)]) coeff.append(c) return coeff
2b877e2e9a606c449b38101e1a23504bff999409
699,727
def _get_native_location(name): # type: (str) -> str """ Fetches the location of a native MacOS library. :param name: The name of the library to be loaded. :return: The location of the library on a MacOS filesystem. """ return '/System/Library/Frameworks/{0}.framework/{0}'.format(name)
53cb9ac2a771883b791a111e53e23bd77c08f43b
699,728
def ra2float(ra): """ Convert ra to degress (float). ra can be given as a time string, HH:MM:SS.SS or as string like '25.6554' or (trivially) as a float. An exception is thrown if ra is invalid 360 deg = 24 hrs, 360/24 = 15 """ if ra is None: return ra if type(ra) is float or type(ra) is int: return float(ra) if (type(ra) is str or type(ra) is str) and ra.find(':') == -1: return float(ra) try: return float(ra) # catch numpy types except: pass assert type(ra) is str,'Invalid parameter format (ra2float - data type %r)' % type(ra) h,m,s = ra.strip().split(':') if h.find('-') != -1: h=h.replace('-','') sign = -1.0 else: sign = 1.0 return sign*(float(h)*15.0 + float(m)/4.0 + float(s)/240.0)
d16e1163f9e821fdff4344eacf7c29b08a8ba266
699,729
def split_unknown_args(argv: list[str]) -> tuple[list[str], list[str]]: """Separate known command-line arguments from unknown one. Unknown arguments are separated from known arguments by the special **--** argument. :param argv: command-line arguments :return: tuple (known_args, unknown_args) """ for i in range(len(argv)): if argv[i] == "--": return argv[:i], argv[i + 1 :] return argv, []
14e6f202e105cb1001563e9a5a5fc1c5f4bd9fd0
699,730
def season_validation(season): """ Decide if the season inputs are valid then return valid inputs. Parameters: season(str): A user's inputs to the season factor. Return: (str): Valid strings without commas or spaces or characters. """ def is_valid_digit(season): """ Decide the condition that controls the while loop. Parameters: continent(str): A user's inputs to the season question. Return: (bool): Decide which type the invalidation is. """ season_input_list = season.split(",") for a in season_input_list : try: int_a = int(a) except: return True else: if int_a != 1 and int_a != 2 and int_a != 3 and int_a != 4 : return True else : continue while is_valid_digit(season) : print("\nI'm sorry, but " + season + " is not a valid choice. Please try again.\n") season = input("\nWhich seasons do you plan to travel in?" + "\n 1) Spring" + "\n 2) Summer" + "\n 3) Autumn" + "\n 4) Winter" + "\n> ") return season
8633c7a1a39103daa212a227317a7d147c3a4bb3
699,731
def byte_to_megabyte(byte): """ Convert byte value to megabyte """ return byte / (1024.0 ** 2)
1b410bcec539e3946a7b5751b984758f89a7ed96
699,732
import string def decode(digits, base): """Decode given digits in given base to number in base 10. digits: str -- string representation of number (in given base) base: int -- base of given number return: int -- integer representation of number (in base 10)""" # Handle up to base 36 [0-9a-z] assert 2 <= base <= 36, 'base is out of range: {}'.format(base) result = 0 power = len(digits)-1 for i in range(len(digits)): if digits[i] in string.ascii_lowercase: digit = ord(digits[i]) - 87 elif digits[i] in string.ascii_uppercase: digit = ord(digits[i]) - 55 else: digit = int(digits[i]) num = (base**power) * digit result += num power -= 1 return result
febdf9973a73de5b3686a20b8c2a5738391a815e
699,734
import argparse def parseArgs(): """Parse script parameters and return its values.""" parser = argparse.ArgumentParser( description="Social Media Profile Cross Reference Tool.") parser.add_argument("-i", help="input csv file with emails", required=True) parser.add_argument("-o", help="output csv file with social profiles", required=True) parser.add_argument("-k", help="api key", default="sample_key") args = parser.parse_args() return args.i, args.o, args.k
a3697b1d90f4cd868aac0f085f8a41674b729855
699,735
def pass_args(args): """Possible argument to attr_handler()""" return args
5681a5f80c1f01bcae099d4baaf7fb07785c5983
699,736
def least_significan_bit(n): """Least significant bit of a number num AND -num = LSB Args: n ([type]): [description] Raises: TypeError: [description] Returns: [type]: [description] """ if type(n) != int: raise TypeError("Number must be Integer.") if n > 0 : return (n&-n)
5fcde70104f885eeb753697fb74d8ec2e7156eae
699,737
def is_k_anonymous(df,partition,sensitive_column,k =3): """ params df: The dataframe on which to check the partition. partition: The partition of the dataframe to check. sensitive_column: The name of the sensitive column k: The desired k returns True if the partition is valid according to our k-anonimity criteria,False otherwise """ if len(partition)<k: return False return True
e58ae8f65524c8a9f7b404d391c2b1d3d0b188c2
699,738
def normalize_variant(variant: str) -> str: """ Normalize variant. Reformat variant replace colons as separators to underscore. chromosome:position:reference:alternative to chromosome_position_reference_alternative :param variant: string representation of variant :return: reformatted variant """ cpra = variant.split(":") cpra[0] = "X" if cpra[0] == "23" else cpra[0] return "_".join(cpra)
2dc97b7f7b09add6a8062db94376c1ab030ff07c
699,739
from typing import Tuple def compute_limits( numdata: int, numblocks: int, blocksize: int, blockn: int ) -> Tuple[int, ...]: """Generates the limit of indices corresponding to a specific block. It takes into account the non-exact divisibility of numdata into numblocks letting the last block to take the extra chunk. Parameters ---------- numdata : int Total number of data points to distribute numblocks : int Total number of blocks to distribute into blocksize : int Size of data per block blockn : int Index of block, from 0 to numblocks-1 Return ---------- start : int Position to start assigning indices end : int One beyond position to stop assigning indices """ start = blockn * blocksize end = start + blocksize if blockn == (numblocks - 1): # last block gets the extra end = numdata return start, end
748344d60baa8f2ecd31ce822c0e33aca981bc13
699,740
import json def getToken(response): """ Get the tokenised card reference from the API response :param response: Response object in JSON :return: String - token """ resp_dict = json.loads(response.text) try: token = resp_dict["token"] except KeyError: print('Retrieval unsuccessful.') return None return token
b849f3b021b995b164b99690a82ecabf881bb18b
699,741
def _read_transition_statistics_from_files(model, verbose): """Parses the transitions statistics from the simulation output files for later analysis Parameters ------- model : obj object containing all anchor and milestone information Returns --------- total_steps : int total number of MD steps taken in all simulations in all anchors """ total_steps = 0 for site in model.sites: for anchor in site.anchors: if anchor.md == True and anchor.directory: #if verbose: print 'parsing md transitions for:Anchor', milestone.fullname #print info['max_steps'] print('parsing md transitions for:Anchor', anchor.fullname) max_steps = anchor._parse_md_transitions() print(max_steps, total_steps) if max_steps > total_steps: total_steps = max_steps return total_steps
1a4f326bd628e6ddd9475c9610b92cb2ba564bba
699,742
def createSubsetGafDict(subset, gafDict): """ Generates a dictionary that maps the subset's Uniprot ACs to the GO IDs, based on the provided gene subset and the gaf dictionary. Parameters ---------- subset : set of str A subset of Uniprot ACs of interest. gafDict : dict of str mapping to set A dictionary that maps Uniprot ACs (str) to a set GO IDs. Generated by importGAF(). Returns ------- dict of str mapping to set A dictionary that maps the subset's Uniprot ACs to GO IDs. """ gafSubsetDict = {gene: gafDict[gene] for gene in subset if gene in gafDict} return gafSubsetDict
76e69cd79c984a19254df171403c008405276408
699,743
def shortest_paths(graph, vertex_key): """Uses Dijkstra's algorithm to find the shortest path from `vertex_key` to all other vertices. If we have no lengths, then each edge has length 1. :return: `(lengths, prevs)` where `lengths` is a dictionary from key to length. A length of -1 means that the vertex is not connected to `vertex_key`. `prevs` is a dictionary from key to key, giving for each vertex the previous vertex in the path from `vertex_key` to that vertex. Working backwards, you can hence construct all shortest paths. """ shortest_length = { k : -1 for k in graph.vertices } shortest_length[vertex_key] = 0 candidates = {vertex_key} done = set() prevs = {vertex_key:vertex_key} while len(candidates) > 0: next_vertex, min_dist = None, -1 for v in candidates: dist = shortest_length[v] if min_dist == -1 or dist < min_dist: min_dist = dist next_vertex = v candidates.discard(next_vertex) done.add(next_vertex) for v in graph.neighbours(next_vertex): edge_index, _ = graph.find_edge(next_vertex, v) dist = min_dist + graph.length(edge_index) current_dist = shortest_length[v] if current_dist == -1 or current_dist > dist: shortest_length[v] = dist prevs[v] = next_vertex if v not in done: candidates.add(v) return shortest_length, prevs
f2ac9abf9292364099748475988d4ee1dbeb4b23
699,744
def process_overall_mode_choice(mode_choice_data): """Processing and reorganizing the data in a dataframe ready for plotting Parameters ---------- mode_choice_data: pandas DataFrame From the `modeChoice.csv` input file (located in the output directory of the simulation) Returns ------- mode_choice: pandas DataFrame Mode choice data that is ready for plotting. """ mode_choice = mode_choice_data # Select columns w/ modes mode_choice = mode_choice.iloc[-1,:] mode_choice = mode_choice.drop(["iterations"]) # Replace "ride_hail" by "on_demand ride" mode_choice.rename({"ride_hail":"on-demand ride"}, inplace = True) return mode_choice
870685017d223f8a277265f80eea56e50eedec90
699,745
def flatten_datasets(rel_datasets): """Take a dictionary of relations, and returns them in tuple format.""" flattened_datasets = [[], [], []] for kind in rel_datasets.keys(): for i in range(0, 3): for rel in rel_datasets[kind][i]: flattened_datasets[i].append([*rel, kind]) return flattened_datasets
70affa370a98c8328effed0bdb015999c5874913
699,746
import torch def log_sum_exp(x, dim=None): """Log-sum-exp trick implementation""" x_max, _ = torch.max(x, dim=dim, keepdim=True) x_log = torch.log(torch.sum(torch.exp(x - x_max), dim=dim, keepdim=True)) return x_log+x_max
45b1f6d198569567d3284bab4116a4703b0589a3
699,747
def nb_year(p0, percent, aug, p): """ Finds the amount of years required for the population to reach a desired amount. :param p0: integer of starting population. :param percent: float of percent increase per year. :param aug: integer of new inhabitants. :param p: integer of desired population. :return: the amount of years to reach the population """ if p0 >= p: return 0 else: return 1 + nb_year(p0 + (p0 * percent / 100) + aug, percent, aug, p)
054496347fc8bedca3424143d48d122712dd1363
699,748
def tshark_read( device, capture_file, packet_details=False, filter_str=None, timeout=60, rm_file=True, ): """Read the packets via tshark :param device: lan or wan... :type device: Object :param capture_file: Filename in which the packets were captured :type capture_file: String :param packet_details: output of packet tree (Packet Details) :type packet_details: Bool :param filter_str: capture filter, ex. 'data.len == 1400' :type filter_str: String :param timeout: timeout after executing the read command; default is 30 seconds :type timeout: int :param rm_file: Flag to remove capture file :type rm_file: bool """ command_string = "tshark -r {} ".format(capture_file) if packet_details: command_string += "-V " if filter_str: command_string += "{}".format(filter_str) device.sendline(command_string) device.expect(device.prompt, timeout=timeout) output = device.before if rm_file: device.sudo_sendline("rm %s" % (capture_file)) device.expect(device.prompt) return output
8fc31098e750691a1aa7c27a868abf0d6254adec
699,749
def resize_image(img): """resize images prior to utilizing in trianing model""" width, height = img.size ratio = width/height new_height = 100 new_width = int(new_height*ratio) img = img.resize((new_width, new_height)) return img
1aa0164e1e25ef0f22e55a15a654fda2dfef5b12
699,750
def _filter_calibration(time_field, items, start, stop): """filter calibration data based on time stamp range [ns]""" if len(items) == 0: return [] def timestamp(x): return x[time_field] items = sorted(items, key=timestamp) calibration_items = [x for x in items if start < timestamp(x) < stop] pre = [x for x in items if timestamp(x) <= start] if pre: calibration_items.insert(0, pre[-1]) return calibration_items
c7575ec85c7da9f1872150a1da3d7b02718df8a0
699,751
import torch def phi_inv(D): """ Inverse of the reallification phi""" AB,_ = torch.chunk(D,2,dim=0) A,B = torch.chunk(AB,2,dim=1) return torch.stack([A,B],dim=len(D.shape))
b8198764b89f3f1261e96014697cf1346e1c7d43
699,752
import os def is_created(): """ Checks to see if ginger new command has already been run on dir """ return os.path.isfile(os.getcwd()+'/_config.yaml')
3a91185bd5c17d659e8cd30bf57c781c6a657092
699,753
import argparse def read_param() -> dict: """ read parameters from terminal """ parser = argparse.ArgumentParser() parser.add_argument( "--ScreenType", help="type of screen ['enrichment'/'depletion']", type=str, choices=["enrichment", "depletion"] ) parser.add_argument("--LibFilename", help="filename of library spreadsheet", type=str) parser.add_argument("--seq_5_end", help="5'-sequence adapter", type=str) parser.add_argument("--CtrlPrefix", help="Name of control", type=str) parser.add_argument("--NonTargetPrefix", help="prefix for non-targeting sgRNAs in library", type=str) parser.add_argument("--NumGuidesPerGene", help="number of sgRNAs per gene", type=int) args = parser.parse_args() # create a dictionary to store arguments args_dict = dict() for arg in vars(args): args_dict[arg] = getattr(args, arg) return args_dict
5efb8419266b34807b286a411cfd36365c66c628
699,754
def Get_foregroundapp(device): """Return the foreground app""" return device.shell("dumpsys activity recents | grep 'Recent #0' | cut -d= -f2 | sed 's| .*||' | cut -d '/' -f1").strip()
236986e3d08f6a4c7dd4cd8c8441806d25e76654
699,755
def chunk_size(request): """ Set the chunk size for the source (or None to use the default). """ return request.param
c57269f434790953d475a2791c862d70d204ed86
699,756
from datetime import datetime def isoformat(dt: datetime) -> str: """ISO format datetime object with max precision limited to seconds. Args: dt: datatime object to be formatted Returns: ISO 8601 formatted string """ # IMPORTANT should the format be ever changed, be sure to update TIMESTAMP_REGEX as well! return dt.isoformat(timespec="seconds")
679ce7aa71ab30e4c78a0953272c17f487714177
699,758
import sqlite3 import os def get_db_cache(cache_dir: str) -> sqlite3.Connection: """ Open cache and return sqlite3 connection Table is created if it does not exists """ cache_file = os.path.join(cache_dir, "cache.sqlite3") conn = sqlite3.connect(cache_file) cursor = conn.cursor() cursor.execute("""CREATE TABLE IF NOT EXISTS asn ( ip string unique, asn int, prefix string, asname string, cn string, isp string, peers string, added int) """) cursor.execute("CREATE INDEX IF NOT EXISTS asn_ip on ASN(ip)") return conn
7dd6a909ba210a261196ddd1273795d76a27464a
699,759
def _recursive_namedtuple_convert(data): """ Recursively converts the named tuples in the given object to dictionaries :param data: An object in a named tuple or its children :return: The converted object """ if isinstance(data, list): # List return [_recursive_namedtuple_convert(item) for item in data] elif hasattr(data, '_asdict'): # Named tuple dict_value = dict(data._asdict()) for key, value in dict_value.items(): dict_value[key] = _recursive_namedtuple_convert(value) return dict_value else: # Standard object return data
292bc249b056c14eb1c700561d366ff4e6e64a10
699,760
def _find_start(score_matrix, align_globally): """Return a list of starting points (score, (row, col)). Indicating every possible place to start the tracebacks. """ nrows, ncols = len(score_matrix), len(score_matrix[0]) # In this implementation of the global algorithm, the start will always be # the bottom right corner of the matrix. if align_globally: starts = [(score_matrix[-1][-1], (nrows - 1, ncols - 1))] else: starts = [] for row in range(nrows): for col in range(ncols): score = score_matrix[row][col] starts.append((score, (row, col))) return starts
361a1ea87ecf9bbef0950521ed0fdcfd70b7b608
699,761
def get_f1_score(precision, recall): """ Calculate and return F1 score :param precision: precision score :param recall: recall score :return: F1 score """ return (2 * (precision * recall)) / (precision + recall)
e94dd20acac443be9856b9dbb43adf2ead2e0ba5
699,762
def bh2u(x: bytes) -> str: """ str with hex representation of a bytes-like object >>> x = bytes((1, 2, 10)) >>> bh2u(x) '01020A' """ return x.hex()
8ab7bf9b536d13a1944e014ea83a4302917c2306
699,763
def vis_FasterRCNN_loss(self, scale_weight): """ Calculate the roi losses for faster rcnn. Args: -- self: FastRCNNOutputs. -- scale_weight: the weight for loss from different scale. Returns: -- losses. """ return{ "loss_cls": self.vis_softmax_cross_entropy_loss_(scale_weight), "loss_box_reg": self.vis_smooth_l1_loss_(scale_weight) }
5832d7f28179085db939a7bd624e9ffb08461fe4
699,764
from typing import List from functools import reduce def decode(obs: int, spaces: List[int]) -> List[int]: """ Decode an observation from a list of gym.Discrete spaces in a list of integers. It assumes that obs has been encoded by using the 'utils.encode' function. :param obs: the encoded observation :param spaces: the list of gym.Discrete spaces from where the observation is observed. :return: the decoded observation. """ result = [] sizes = spaces[::-1] shift = reduce(lambda x, y: x*y, sizes) // sizes[0] for size in sizes[1:]: r = obs // shift result.append(r) obs %= shift shift //= size result.append(obs) return result[::-1]
6c3c1348776b7b164cf70a5bfa9da3e8b53a280f
699,765
def anagram_solution_1(words): """ Complexity O(n2) If it is possible to “checkoff” each character, then the two strings must be anagrams :param words: Tuple :return: bool """ s1, s2 = words still_ok = True if len(s1) != len(s2): still_ok = False a_list = list(s2) pos_1 = 0 while pos_1 < len(s1) and still_ok: pos_2 = 0 found = False while pos_2 < len(a_list) and not found: if s1[pos_1] == a_list[pos_2]: found = True else: pos_2 = pos_2 + 1 if found: a_list[pos_2] = None else: still_ok = False pos_1 = pos_1 + 1 return still_ok
942ef7bb631bd803d89e71643e994505cb9b827a
699,766
import re def empty_template(pattern, template): """F() to extract all {words} from the template using pattern""" template = re.sub(pattern, "{}", template) return template
b62592a449ce38971cc81083928cfd034be1984b
699,767
import pickle def read_pickle(file_name): """Reload the dataset""" with open (file_name,'rb') as file: return pickle.load(file)
404d578de68db726e14f6214aed3e512a9abf947
699,768
import re def duration_to_seconds(duration): """ Convert duration string to seconds :param duration: as string (either 00:00 or 00:00:00) :return: duration in seconds :class:`int` or None if it's in the wrong format """ if not re.match("^\\d\\d:\\d\\d(:\\d\\d)?$", duration): return None array = duration.split(':') if len(array) == 2: return int(array[0]) * 60 + int(array[1]) return int(array[0]) * 3600 + int(array[1]) * 60 + int(array[2])
25340e85fdc2db03eaa65aba60a158f951da389a
699,769
def flatten_tests(test_classes): """ >>> test_classes = {x: [x] for x in range(5)} >>> flatten_tests(test_classes) [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] >>> test_classes = {x: [x + 1, x + 2] for x in range(2)} >>> flatten_tests(test_classes) [(0, 1), (0, 2), (1, 2), (1, 3)] """ tests = [] for class_name, test_names in test_classes.items(): tests += [(class_name, test_name) for test_name in test_names] return tests
332b98ab499ff53ba974d51ac862e7015f616e64
699,770
def clues_too_many(text: str) -> bool: """ Check for any "too many connections" clues in the response code """ text = text.lower() for clue in ("exceed", "connections", "too many", "threads", "limit"): # Not 'download limit exceeded' error if (clue in text) and ("download" not in text) and ("byte" not in text): return True return False
0d105985d7eb032668ad8b08704658846da725b7
699,771
def load_vocab(filename: str): """ load vocabulary from given dataset file """ with open(filename, 'r', encoding='utf-8') as f: text = f.read().strip().split('\n') text = [word.split(' ') for word in text] vocab_dict = {word[0]: word[1:] for word in text} vocab_dict = {k: [float(d) for d in v] for k, v in vocab_dict.items()} return vocab_dict
fe11e129439173ba13ca276c788c0f27053f5e6a
699,773
import re def normalize_whitespace(s: str) -> str: """Convert all whitespace (tabs, newlines, etc) into spaces.""" return re.sub(r"\s+", " ", s, flags=re.MULTILINE)
6d8b65bcdca9838aa0f4d16d158db5d2218cbf24
699,774
import os import glob def find_granule_metafiles_in_SAFE(inSAFE, tile_glob_pattern='*', tile_name=None): """Find granule metadata files in SAFE Paramters --------- inSAFE : str path to .SAFE folder tile_glob_pattern : str, optional granule glob search pattern e.g. '32???' tile_name : str, optional granule name e.g. '32UPF' overrides tile_glob_pattern Returns ------- list of str paths to granule metadata files """ if tile_name is not None: tile_glob_pattern = tile_name tile_glob_pattern = '*_T{}*'.format(tile_glob_pattern.upper().lstrip('T')) pattern = os.path.join(inSAFE, 'GRANULE', tile_glob_pattern, '*.xml') return sorted(glob.glob(pattern))
8aed455fd01e9b74ec870ad6e4646521bc4cbe40
699,775
def set_dist_names(spc_dct_i, saddle): """ Set various things needed for TSs """ dist_names = [] if saddle: dist_names = [] mig = 'migration' in spc_dct_i['class'] elm = 'elimination' in spc_dct_i['class'] if mig or elm: dist_names.append(spc_dct_i['dist_info'][0]) dist_names.append(spc_dct_i['dist_info'][3]) return dist_names
cb5f72b3d00efb8272f7b5012f776bbd63e38634
699,776
def fit_loss(ac, fit): """Loss of the fit """ r = ac - fit error = sum(r ** 2) / sum(ac ** 2) return error
00d41891dea7d914299d87d971fb2a41df2109ed
699,777
def growth_pos_check(clods, pos): """for checking the position of a growing seed tip Like position_calc but amended for growing seed each time the seed grows there is a cheek for clod collisions :param clods: All the clod objects in the bed :param pos: The proposed position of the seed tip :return: found int 0 for no clod hit or the number of the clod hit """ found = 0 if len(clods) > 0: for x in range(0, len(clods)): if (pos[0]) >= (clods[x].pos[0] - clods[x].radi) and (pos[0]) <= (clods[x].pos[0] + clods[x].radi): # check x only move on if a clash is found if (pos[1]) >= (clods[x].pos[1] - clods[x].radi) and (pos[1]) <= (clods[x].pos[1] + clods[x].radi): # check y #print "pos seedling tip = " + str(pos) #print "hiting clod " + str(x) + " or " + str(clods[x].clod_no) + " at pos = " + str(clods[x].pos) + " rdi=" + str(clods[x].radi) if (pos[2]) >= (clods[x].pos[2] - clods[x].radi) and (pos[2]) <= (clods[x].pos[2] + clods[x].radi): # check z #print "FAIL" found = clods[x].clod_no # the seed grouth is inpeaded! return found
743455907cc81dc3950db202b4f23a79c1eafd33
699,778
def determine_letter(current_score): """ Calculates the letter grade for a given score :param current_score: the score to be evaluated :return: the letter grade that score falls within """ if current_score >= 90: return "A" elif current_score >= 80: return "B" elif current_score >= 70: return "C" elif current_score >= 60: return "D" else: return "F"
324aaa8e28a0cbc298410ecd83ea4eee6d39a970
699,779
from typing import Tuple from typing import Dict def parse_icf(icf_file: str) -> Tuple[Dict, Dict]: """Parse ICF linker file. ST only provides .icf linker files for many products, so there is a need to generate basic GCC compatible .ld files for all products. This parses the basic features from the .icf format well enough to work for the ST's .icf files that exist in `cmsis_device` Args: icf_file: .icf linker file read into a string Returns: (regions, blocks) where `regions` is a map from region_name -> (start_hex, end_hex) `blocks` is a map from block_name -> {feature_1: val_1,...} Raises: IndexError if .icf is malformed (at least compared to how ST makes them) """ symbols = {} regions = {} # region: (start_addr, end_addr) blocks = {} for line in icf_file.split('\n'): line = line.strip() if line == '' or line.startswith('/*') or line.startswith('//'): continue tokens = line.split() if len(tokens) < 2: continue if tokens[0] == 'define': if tokens[1] == 'symbol': symbols[tokens[2]] = tokens[4].strip(';') elif tokens[1] == 'region': regions[tokens[2].split('_')[0]] = (tokens[5], tokens[7].strip('];')) elif tokens[1] == 'block': blocks[tokens[2]] = { tokens[4]: tokens[6].strip(','), tokens[7]: tokens[9] } parsed_regions = { region: (symbols[start] if start in symbols else start, symbols[end] if end in symbols else end) for region, (start, end) in regions.items() } parsed_blocks = { name: {k: symbols[v] if v in symbols else v for k, v in fields.items()} for name, fields in blocks.items() } return (parsed_regions, parsed_blocks)
ddc1288603d0697bf915eb82a712f210f54efacd
699,780
def translate(word): """ translates third person words into first person words """ forms = {"is" : "am", 'she' : 'I', 'he' : 'I', 'her' : 'my', 'him' : 'me', 'hers' : 'mine', 'your' : 'my', 'has' : 'have'} if word.lower() in forms: return forms[word.lower()] return word
ac433a4db5154c065e2cec1263976dba72863ad8
699,781
def normalize_attribute(attr): """ Normalizes the name of an attribute which is spelled in slightly different ways in paizo HTMLs """ attr = attr.strip() if attr.endswith(':'): attr = attr[:-1] # Remove trailing ':' if any if attr == 'Prerequisites': attr = 'Prerequisite' # Normalize if attr == 'Note': attr = 'Prerequisite' # Normalize a very special case (Versatile Channeler) if attr == 'Benefits': attr = 'Benefit' # Normalize if attr == 'Leadership Modifiers': attr = 'Benefit' # Normalize a very special case (Leadership) assert attr in ('Prerequisite', 'Benefit', 'Normal', 'Special') return attr.lower()
2cb66878547ee8a98c14bf08261f2610def57a37
699,782
from typing import List import shutil def _get_build_bf_command(args: dict, in_fn: List[str]) -> List[str]: """Helper function to compose command to get the final Bloom Filter :arg dict args: Dict of arguments. :arg str in_fn: Path to file where the reads will be read :ivar dict args: Dict of arguments. The ones used are: - "kmer" (int): size of the kmers. - "threads" (int): number of threads to be used. - "bloom_size" (str): size of the Bloom filter in bytes. K/M/G units can be used. - "levels" (int): number of Bloom filters used. - "output_bloom" (str): path where to store the Bloom Filter :ivar str in_fn: Path to file where the reads will be read. In later methods /dev/stdin is used. :param dict args: Dict of arguments. :param str in_fn: Input filename. ..seealso: Where it is used: :py:meth: `build_baited_bloom_filter` """ abyss_bloom_path = shutil.which('abyss-bloom') build_bf = [ abyss_bloom_path, 'build', '--verbose', '--kmer', str(args["kmer"]), '--bloom-size', args["bloom_size"], '--levels', str(args["levels"]), '--threads', str(args["threads"]), args["bloom"] ] + in_fn return build_bf
9736a1825613dd518361bb1a5ded9b266293017a
699,783
import sys def _is_venv(): """ :return: """ return hasattr(sys, 'real_prefix') or getattr(sys, 'base_prefix', sys.prefix) != sys.prefix
87a6434bd4b572abbacf5b8bb81250e6287181d4
699,784
def is_user_diabetic(avg_glucose_level): """ desc: converts avg_glucose_level to category based on ADA Guidelines https://www.diabetes.org/a1c/diagnosis args: avg_glucose_level (float) : glucose level in blood based on mg/dL returns: blood_cat (string) : blood sugar category """ if avg_glucose_level >= 200: return 1 else: return 0
59b8a9937f620c28eb51da4ef56c493d1b2177d8
699,785
import array def formatTilePlanar(tile, nPlanes): """Convert an 8x8 pixel image to planar tile data, 8 bytes per plane.""" if (tile.size != (8, 8)): return None pixels = iter(tile.getdata()) outplanes = [array.array('B') for i in range(nPlanes)] for y in range(8): slivers = [0 for i in range(nPlanes)] for x in range(8): px = next(pixels) for i in range(nPlanes): slivers[i] = slivers[i] << 1 if px & 0x01: slivers[i] = slivers[i] | 1 px >>= 1 for i in range(nPlanes): outplanes[i].append(slivers[i]) out = b"".join(plane.tobytes() for plane in outplanes) return out
5ff30470a1392744139a5577f2a51a519f58ab42
699,786
def gen_factory(func, seq): """Generator factory returning a generator.""" # do stuff ... immediately when factory gets called print("build generator & return") return (func(*args) for args in seq)
9188f5959ec2feb52b83f20f40474f91f4cbfe08
699,787
def _parseExpectedWords(wordList, defaultSensitivity=80): """Parse expected words list. This function is used internally by other functions and classes within the `transcribe` module. Expected words or phrases are usually specified as a list of strings. CMU Pocket Sphinx allows for additional 'sensitivity' values for each phrase ranging from *0* to *100*. This function will generate to lists, first with just words and another with specified sensitivity values. This allows the user to specify sensitivity levels which can be ignored if the recognizer engine does not support it. Parameters ---------- wordList : list of str List of words of phrases. Sensitivity levels for each can be specified by putting a value at the end of each string separated with a colon `:`. For example, ``'hello:80'`` for 80% sensitivity on 'hello'. Values are normalized between *0.0* and *1.0* when returned. defaultSensitivity : int or float Default sensitivity to use if a word does not have one specified between 0 and 100%. Returns ------- tuple Returns list of expected words and list of normalized sensitivities for each. Examples -------- Specifying expected words to CMU Pocket Sphinx:: words = [('hello:95', 'bye:50')] expectedWords = zip(_parseExpectedWords(words)) """ defaultSensitivity = defaultSensitivity / 100. # normalized sensitivities = [] if wordList is not None: # sensitivity specified as `word:80` wordListTemp = [] for word in wordList: wordAndSense = word.split(':') if len(wordAndSense) == 2: # specified as `word:80` word, sensitivity = wordAndSense sensitivity = int(sensitivity) / 100. else: word = wordAndSense[0] sensitivity = defaultSensitivity # default is 80% confidence wordListTemp.append(word) sensitivities.append(sensitivity) wordList = wordListTemp return wordList, sensitivities
83512a86ae112de79bd84e1d9ea3ebebcb4cdefd
699,788
import six import os def get_file_name_list(paths, ending=None): """Returns the list of files contained in any sub-folder in the given paths (can be a single path or a list of paths). :param paths: paths to the directory (a string or a list of strings) :param ending: if given, restrict to files with the given ending """ file_name_list = [] if isinstance(paths, six.string_types): paths = [paths] for path in paths: for dir_path, dir_names, file_names in os.walk(path): dir_names.sort() file_names.sort() for file_name in file_names: if (ending and file_name.endswith(ending)) or not ending: file_name_list.append(os.path.join(dir_path, file_name)) return file_name_list
c02e81e426937caed9c1d17c5e83147597e54a1a
699,790
import requests def delete_menu(access_token): """ 删除菜单 请谨慎使用 http请求方式:GET https://api.weixin.qq.com/cgi-bin/menu/delete?access_token=ACCESS_TOKEN :param str access_token: ACCESS_TOKEN :rtype: json """ menu_url = 'https://api.weixin.qq.com/cgi-bin/menu/delete?access_token={access_token}'.format( access_token=access_token) r = requests.get(menu_url) assert r.status_code == 200 return r.json()
fb52f406d52b2dad2aedfd8c04913a3ca94b6ca1
699,791
def _generate_trades_df_response(trades_df): """ Generates JSON response from trades Attributes ---------- trades_df: DataFrame of Trades """ trades_df = trades_df.drop( columns=["symbol", "trade_id", "stop_loss", "take_profit"] ) trades_df = trades_df.rename( columns={ "cash_value": "amount_invested", "monetary_amount": "cash_allocated", } ) trades_df = trades_df.round(2) trades = trades_df.to_dict(orient="records") return trades
34244801babedeb75ec21001ea99a9e8aef863e2
699,792
def measurement(qreg=int(0), creg=int(0)): """Generate QASM that takes a measurement from a qubit and stores it in a classical register. Args: qreg(int): Number of the Qubit to measure. (default 0) creg(int): Number of the Classical Register to store the measurement to. (default 1) Returns: str: Generated QASM containing measurement instruction.""" # Ensure Integer Variables Have Correct Types if qreg is not None: qreg = int(qreg) if creg is not None: creg = int(creg) # Generate a measurement argument for QASM 2.0. meas_str = f'measure q[{str(qreg)}] -> c[{str(creg)}];' # Return generated measurement argument. return meas_str
9a9a24f390bf0745e7cdfe80bb1893f77161c171
699,794
def get_vplex_port_parameters(): """This method provide parameter required for the ansible port module on VPLEX""" return dict( cluster_name=dict(required=True, type='str'), port_name=dict(required=True, type='str'), enabled=dict(required=False, type='bool'), state=dict(required=True, type='str', choices=['present', 'absent']) )
9533cf6ff8eedd943b88c9cd08ea16407aa9ee64
699,795
def adjacent(p): """Return the positions adjacent to position p""" return ( (p[0] + 1, p[1], p[2]), (p[0] - 1, p[1], p[2]), (p[0], p[1] + 1, p[2]), (p[0], p[1] - 1, p[2]), (p[0], p[1], p[2] + 1), (p[0], p[1], p[2] - 1), )
988597e0abd150ae60b556e52a217bd8a136707b
699,796
async def goodbye(): """ used in our randomized redirect """ return {"message": "Goodbye"}
2a0ce1fe99497b55cfd9a9853cd75a0c7eddac40
699,797
def make_table(*list_of_list): """ :param list_of_list: list of list of strings :returns: a valid rst table """ lcols = [len(x) for x in list_of_list[0]] for li in list_of_list : # compute the max length of the columns lcols = [ max(len(x), y) for x,y in zip(li, lcols)] form = '| ' + " | ".join("{:<%s}"%x for x in lcols).strip() + ' |' sep = '+' + '+'.join((x+2) *'-' for x in lcols) + '+' r = [sep] for li in list_of_list: r += [form.format(*li), sep] return '\n'.join(r) + '\n'
77153451571e70a77d6bd525bd252aa7efe1c693
699,798
import re def replace_special_whitespace_chars(text: str) -> str: """It's annoying to deal with nonbreaking whitespace chars like u'xa0' or other whitespace chars. Let's replace all of them with the standard char before doing any other processing.""" text = re.sub(r"\s", " ", text) return text
17be082a827039264cd75fb7459fc31eb7f617dd
699,799
import re def processSets(results): """Process set results to be displayed in the set selection window :param List[Dict[str, Union[int, str, Dict[str, str]]]] results: A list of raw set results :return: A list of processed results in table form :rtype: List[Union[str, int]] """ rows = [] for item in results: if item['edition_size'] == 'unlimited': char = '∞' else: char = 'LE' name = item['name'] emoji_pattern = re.compile("[" u"\U0001F600-\U0001F64F" # emoticons u"\U0001F300-\U0001F5FF" # symbols & pictographs u"\U0001F680-\U0001F6FF" # transport & map symbols u"\U0001F1E0-\U0001F1FF" # flags (iOS) "]+", flags=re.UNICODE) rows.append([emoji_pattern.sub(r'', name), item['creator']['name'], char, item['id']]) return rows
bf050015474691402d2d992070c487b45cc34a42
699,800
def two_sequences_in_parallel(sequence1, sequence2): """ Demonstrates iterating (looping) through TWO sequences in PARALLEL. This particular example assumes that the two sequences are of equal length and returns the number of items in sequence2 that are bigger than their corresponding item in sequence1. For example, if the sequences are: [11, 22, 10, 44, 33, 12] [55, 10, 30, 30, 30, 30] then this function returns 3, since 55 > 11 and 30 > 10 and 30 > 12. """ # ------------------------------------------------------------------ # The TWO-SEQUENCES-IN-PARALLEL pattern is: # # for k in range(len(sequence1)): # ... sequence1[k] ... sequence2[k] ... # # The above assumes that the sequences are of equal length # (or that you just want to do the length of sequence1). # ------------------------------------------------------------------ count = 0 for k in range(len(sequence1)): if sequence1[k] > sequence2[k]: count = count + 1 return count
c5dbce5f99d5c2efeee4048ec1451ea63f404fef
699,801
def image_crop(src, x1, y1, x2, y2): """ Crop image from (x1, y1) to (x2, y2). Parameters ---------- :param src: Input image in BGR format :param x1: Initial coordinates for image cropping :param y1: Initial coordinates for image cropping :param x2: End coordinates of image cropping :param y2: End coordinates of image cropping """ return src[x1:x2, y1:y2]
6ab70dc644d0d7054ea70fadcf7ec0ca381918d8
699,802
import argparse def parse_arguments(): """ Simple argument parsing using python's argparse return: Python's argparse parser object """ parser = argparse.ArgumentParser() parser.add_argument("--input", help="Single XML file or directory", action="store") parser.add_argument("--log", help="Generate a log file listing all XML files failining validation", action="store_true") return parser
0dcf19b43923c8e59af1444372421a07d6695534
699,803
import re def get_doc(src) : """get comments from Python source code Parameter -------------- src@str - the source code """ pat = re.compile(r'((?:def|class)\s+[^\n]*\s*)"""(.*?)"""',re.MULTILINE|re.DOTALL) return [gs for gs in pat.findall(src)]
bb9716a9f7b3c99d5ea3e468b7f69f893773feda
699,804
def _GetBinaryName(client): """Gets the GRR binary name on the client.""" client_data = client.Get().data return client_data.agent_info.client_binary_name
c363a4a42e81184ce2cb74bfdc9c2fd01a746cb1
699,805
def fill_with_gauss(df, w=12): """ Fill missing values in a time series data using gaussian """ return df.fillna( df.rolling(window=w, win_type="gaussian", center=True, min_periods=1).mean( std=2 ) )
fdfdedaf7968f617ff98df586b89c30053a6c886
699,806
def swap(heights_list, index01, index02): """swap two positions in a list at given indexes Args: heights_list (list): iterable in which swapping occurs index01 (int): index of first element index02 (int): index of second element Returns: list: list with element positions swapped """ heights_list[index01], heights_list[index02] = heights_list[index02], heights_list[index01] return heights_list
f7add4a06a79837766b5840840d17c3247b0bcae
699,807
def calibrate_seq(cigar_seq, sequence, md_seq, ref_positions): """ making cigar seq and seq as same length with Deletions as '-' """ new_sequence = '' new_pos = [] new_cigar = '' new_md = '' seq = iter(sequence) pos = iter(ref_positions) md = iter(md_seq) current_position = 0 for cigar in cigar_seq: if cigar == 'S': seq.next() elif cigar == 'D': new_cigar += cigar new_pos.append(current_position + 1) new_sequence += '-' new_md += md.next() elif cigar == 'I': new_cigar += cigar new_pos.append(current_position) current_base = seq.next() new_sequence += current_base new_md += '+' elif cigar == 'M': current_base = seq.next() current_position = pos.next() new_sequence += current_base new_pos.append(current_position) new_cigar += cigar new_md += md.next() return new_cigar, new_sequence, new_pos, new_md
b149587d8f61a4e7c82d4bde94e62512a5682346
699,808
def create_first_n_1_bits_mask(n, k): """ Return a binary mask of first n bits of 1, k bits of 0s""" if n < 0 or k < 0: raise ValueError("n and k cannot be negative number") if n == 0: return 0 mask = (2 << n) - 1 return mask << k
5a9b637a8973f004da2330c8ebb06ea63fd542c3
699,809
import re def clean_str(string): """ Strip and replace some special characters. """ msg = str(string) msg = msg.replace("\n", " ") msg = re.sub(r"\s+", r" ", msg) msg = re.sub(r"^\s", r"", msg) msg = re.sub(r"\s$", r"", msg) return msg
50132d2c56498f4590fcba7837deb791500f3110
699,810
def char_to_ix(chars): """ Make a dictionary that maps a character to an index Arguments: chars -- list of character set Returns: dictionary that maps a character to an index """ return {ch: i for i, ch in enumerate(chars)}
8bfc5b99c7f5aef6d88276fe4b3ad005ce9a017e
699,811
def bool_tag(name, value): """Create a DMAP tag with boolean data.""" return name.encode('utf-8') + \ b'\x00\x00\x00\x01' + \ (b'\x01' if value else b'\x00')
dc914d262a20eed0732e477f75641daa4811fd9f
699,812
def _concat(*lists): """Concatenates the items in `lists`, ignoring `None` arguments.""" concatenated = [] for list in lists: if list: concatenated += list return concatenated
a1eea1c074fe1eee1ca454899bf9dec2719a333e
699,813
def get_least_squares_size(modelform, r, m=0, affines=None): """Calculate the number of columns in the operator matrix O in the Operator Inference least-squares problem. Parameters --------- modelform : str containing 'c', 'A', 'H', 'G', and/or 'B' The structure of the desired reduced-order model. Each character indicates the presence of a different term in the model: 'c' : Constant term c 'A' : Linear state term Ax. 'H' : Quadratic state term H(x⊗x). 'G' : Cubic state term G(x⊗x⊗x). 'B' : Input term Bu. For example, modelform=="AB" means f(x,u) = Ax + Bu. r : int The dimension of the reduced order model. m : int The dimension of the inputs of the model. Must be zero unless 'B' is in `modelform`. affines : dict(str -> list(callables)) Functions that define the structures of the affine operators. Keys must match the modelform: * 'c': Constant term c(µ). * 'A': Linear state matrix A(µ). * 'H': Quadratic state matrix H(µ). * 'G': Cubic state matrix G(µ). * 'B': linear Input matrix B(µ). For example, if the constant term has the affine structure c(µ) = θ1(µ)c1 + θ2(µ)c2 + θ3(µ)c3, then 'c' -> [θ1, θ2, θ3]. Returns ------- ncols : int The number of columns in the Operator Inference least-squares problem. """ has_inputs = 'B' in modelform if has_inputs and m == 0: raise ValueError(f"argument m > 0 required since 'B' in modelform") if not has_inputs and m != 0: raise ValueError(f"argument m={m} invalid since 'B' in modelform") if affines is None: affines = {} qc = len(affines['c']) if 'c' in affines else 1 if 'c' in modelform else 0 qA = len(affines['A']) if 'A' in affines else 1 if 'A' in modelform else 0 qH = len(affines['H']) if 'H' in affines else 1 if 'H' in modelform else 0 qG = len(affines['G']) if 'G' in affines else 1 if 'G' in modelform else 0 qB = len(affines['B']) if 'B' in affines else 1 if 'B' in modelform else 0 return qc + qA*r + qH*r*(r+1)//2 + qG*r*(r+1)*(r+2)//6 + qB*m
86cf6a0b3e4b256eaccb3f061d21f7de74dcc604
699,814
def read(rows): """Reads the list of rows and returns the sudoku dict. The sudoku dict maps an index to a known value. Unknown values are not written. Indices go from 0 to 80. """ sudoku = {} i = 0 for rn, row in enumerate(rows): if rn in (3, 7): continue j = 0 for cn, c in enumerate(row.rstrip()): if cn in (3, 7): continue if c != ".": sudoku[i * 9 + j] = int(c) j += 1 i += 1 return sudoku
1f1a06a32d1be70f3d912bd42b9cca07f7d4879d
699,815
import subprocess def get_server_info(): """ Returns server information """ container_name = subprocess.check_output("uname -n", shell=True).decode("ascii").strip() ip_addr = subprocess.check_output("hostname -I", shell=True).decode("ascii").strip() cores = subprocess.check_output("nproc", shell=True).decode("ascii").strip() cmd = "cat /sys/class/net/eth0/speed | awk '{print $0 / 1000\"GbE\"}'" net_speed = subprocess.check_output(cmd, shell=True).decode("ascii").strip() # cmd = "cat /sys/class/net/eth0/address" # mac_address = subprocess.check_output(cmd, shell=True).decode("ascii").strip() cmd = "grep MemTotal /proc/meminfo | awk '{print $2 / 1024 / 1024\"GB\"}'" memory = subprocess.check_output(cmd, shell=True).decode("ascii").strip() server_info = {'container_name': container_name, 'ip_address': ip_addr, 'net_speed': net_speed, 'cores': cores, 'memory': memory} """ if os.path.exists("/proc"): server_info.update({'/proc/cpuinfo': open("/proc/cpuinfo", 'r').read(), '/proc/meminfo': open("/proc/meminfo", 'r').read(), '/proc/self/cgroup': open("/proc/meminfo", 'r').read(), '/proc/cgroups': open("/proc/cgroups", 'r').read()}) """ return server_info
0bbe71a91bd6e183fd3980f49935b801dede9fbf
699,816
def uniquify(iterable): """Remove duplicates in given iterable, preserving order.""" uniq = set() return (x for x in iterable if x not in uniq and (uniq.add(x) or True))
563953cc6450a0136a4996d4a5f5a0057f6ad69b
699,817
def count_candidate_votes(candidate_dict: dict, csv_data: list) -> dict: """ Go through the candidate list and count the number of votes each time it appears. :param candidate_dict: :param csv_data: :return: """ # Go through the csv and get the candidate's name for row in csv_data: candidate_name = row[2] # Add the candidate vote to the dictionary and update it for candidate, data in candidate_dict.items(): if candidate_name == candidate: data["votes"] += 1 return candidate_dict
b2ca61d78183c1b152e22bb413ebb89a8dff8b0a
699,818
def __getDeviceByUDN(deviceElements, udn): """Search and return the device element defined by the UDN from the listDevices elements""" for device_element in deviceElements: if device_element.getAttribute("udn") == udn: return device_element
84d271b18dbcdf60688c1d921469bef45b4e4721
699,819
def createTable(c): """This makes a table Keyword arguments: c -- the cursor object of the connected database """ try: c.execute(""" CREATE TABLE IF NOT EXISTS members ( username TEXT, password TEXT ); """) except: print("Error") return False return True
37e7eb9fda45871c0b8890c3111c920d608f80fb
699,820
import os def link_to_frontend(): """ The backend expects a link to a directory called frontend at the same level. Attempt to create one if it does not exist. """ backend_path = os.path.dirname(os.path.realpath(__file__)) link_path = backend_path + '/frontend' frontend_path = backend_path.replace('backend', 'frontend') if not os.path.isdir(link_path): print("Expected to find a linked directory to the frontend.") print("Checking default location %s." % frontend_path) if os.path.isdir(frontend_path): print("Attempting to create linked directory to %s." % frontend_path) os.symlink(frontend_path, link_path) else: print("Frontend directory not found in %s." % frontend_path) print("Either install the frontend to the default directory " "or manually create a link in the backend directory called " "'frontend' to the directory where the front end is installed.") return 1 return 0
3e1ebad7bd42ce25f52416aa88eb09886b42b7d8
699,822
def badFormatting(s, charSet): """Tells if a character from charSet appears in a string s.""" for c in charSet: if c in s: return True return False
23baba28be306e0d0c1ccaa0df48e1a9f94bdc8c
699,823
def column_to_width(df_in, column, width): """Pad the column header and the values in the column with whitespace to a specific width. """ df = df_in.copy() df[column] = df[column].apply(lambda x: ('{:>' + str(width) + '}').format(x)) df = df.rename(columns={column: ('{:>' + str(width) + '}').format(column)}) return df
988f021c7ff2f296ecacd83ddbced0de6404e3fc
699,824
def use_netrc(netrc, urls, patterns): """compute an auth dict from a parsed netrc file and a list of URLs Args: netrc: a netrc file already parsed to a dict, e.g., as obtained from read_netrc urls: a list of URLs. patterns: optional dict of url to authorization patterns Returns: dict suitable as auth argument for ctx.download; more precisely, the dict will map all URLs where the netrc file provides login and password to a dict containing the corresponding login, password and optional authorization pattern, as well as the mapping of "type" to "basic" or "pattern". """ auth = {} for url in urls: schemerest = url.split("://", 1) if len(schemerest) < 2: continue if not (schemerest[0] in ["http", "https"]): # For other protocols, bazel currently does not support # authentication. So ignore them. continue host = schemerest[1].split("/")[0].split(":")[0] if not host in netrc: continue authforhost = netrc[host] if host in patterns: auth_dict = { "type": "pattern", "pattern": patterns[host], } if "login" in authforhost: auth_dict["login"] = authforhost["login"] if "password" in authforhost: auth_dict["password"] = authforhost["password"] auth[url] = auth_dict elif "login" in authforhost and "password" in authforhost: auth[url] = { "type": "basic", "login": authforhost["login"], "password": authforhost["password"], } return auth
561ee1388dbdde74614fdef1fb29b78c7ecc687b
699,825
def str_cutoff(string: str, max_length: int, cut_tail: bool = False) -> str: """ Abbreviate a string to a given length. The resulting string will carry an indicator if it's abbreviated, like ``stri#``. Parameters ---------- string : str String which is to be cut. max_length : int Max resulting string length. cut_tail : bool ``False`` for string abbreviation from the front, else ``True``. Returns ------- str Resulting string """ if max_length < 1: raise ValueError("max_length < 1 not allowed") if len(string) > max_length: pos = max_length - 1 return string[:pos] + "#" if cut_tail else "#" + string[-pos:] return string
05fdab8700dd07710c31d4007c9bc6b3f9eb6155
699,826
def is_over(board): """Returns True if the game is over, False if not""" for player in range(2): for move_x in range(board.height): for move_y in range(board.width): list_near_points = [] #list of the number of the player payns in each direction starting from the last one beginning with up then going clockwise directions = [(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1)] for dir in directions: k=0 while move_y+dir[0]*k >= 0 and move_y+dir[0]*k < board.width and move_x+k*dir[1] >= 0 and move_x+k*dir[1] <board.width: if board.read_tile(move_y+dir[0]*k,move_x+k*dir[1]) == player: k+=1 else: break list_near_points.append(k-1) for k in range(4): if list_near_points[k]+list_near_points[k+4] >1 : return True is_full = True for move in range(board.width): for i in range(board.height): if board.read_tile(i,move) == None: is_full = False if is_full: return True return False
9302d53f72ece8928763a70b10fb265bc6b8151b
699,827
def wireless(card, mode=None, apn=None): """Retrieve wireless modem info or customize modem behavior. Args: card (Notecard): The current Notecard object. mode (string): The wireless module mode to set. apn (string): Access Point Name (APN) when using an external SIM. Returns: string: The result of the Notecard request. """ req = {"req": "card.wireless"} if mode: req["mode"] = mode if apn: req["apn"] = apn return card.Transaction(req)
355256bd8123f0f749561f61a2df3be93b91db61
699,828
def filter_none(x): """ Recursively removes key, value pairs or items that is None. """ if isinstance(x, dict): return {k: filter_none(v) for k, v in x.items() if v is not None} elif isinstance(x, list): return [filter_none(i) for i in x if x is not None] else: return x
c1c478b2c367dd9453b5504bbfece7dfd8c05376
699,829
def _dmet_orb_list(mol, atom_list): """Rearrange the orbital label Args: mol (pyscf.gto.Mole): The molecule to simulate. atom_list (list): Atom list for IAO assignment (int). Returns: newlist (list): The orbital list in new order (int). """ newlist = [] for i in range(mol.natm): for j in range(mol.nao_nr()): if (atom_list[j] == i): newlist.append(j) return newlist
7998d9cec104bc02ad3daf600d2e24d9b1f5f243
699,830
def _post_processing(metric_map: dict[str, float]) -> dict[str, float]: """ unit conversion etc... time: taskTime, executorDeserializeTime, executorRunTime, jvmGcTime are milliseconds executorDeserializeCpuTime, executorCpuTime are nanoseconds """ metric_map["executorDeserializeCpuTime"] = metric_map[ "executorDeserializeCpuTime"] * 1e-6 metric_map["executorCpuTime"] = metric_map["executorCpuTime"] * 1e-6 return metric_map
23ff301d55e0dc2d2208aca5761059fb8ade3e4e
699,831
import re def wildcard_to_regex(wildcard): """ Converts a * syntax into a parsed regular expression Maya wildcard validation: 1. Maya does not support '-' characters so we change those characters by '_' 2. Maya uses | as separators, so we scape them 3. We need to replace any '*' into .+' 4. Expression must end with $ :param wildcard: str, wildcard to parse. If not wildcard is provided, we match everything. :return: str """ if not wildcard: expression = '.*' else: wildcard = wildcard.replace('-', '_') expression = re.sub(r'(?<!\\)\|', r'\|', wildcard) expression = re.sub(r'(?<!\\)\*', r'.*', expression) if not expression[-1] == '$': expression += '$' regex = re.compile(expression, flags=re.I) return regex
aa8460305a6129d1a114845882dfcf29b547431b
699,832