content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def l1_loss(pred, target): """ L1 Loss. Args: pred (:obj:`torch.Tensor`): The predictions. target (:obj:`torch.Tensor`): The learning targets. Returns: :obj:`torch.Tensor`: The loss tensor. """ if target.numel() == 0: return pred.sum() * 0 assert pred.size() == target.size() loss = (pred - target).abs() return loss
bc739daff91dece113d4c170c6cbb83cee034049
91,787
import re def cwb_escape(inname): """Replace dots with "-" for CWB compatibility.""" return re.sub(r"\.", "-", inname)
b552fc8a0ccf8d61c4febbc7617c4c0e3433f013
91,800
import random def gen(n=12, flags={}, forbidden=""): """Generate a random string of characters for use as a secure password. Inputs n: Length of the string to generate. flags: digits: Include at least one digit in the string. lower: Include at least one lowercase letter in the string. upper: Include at least one uppercase letter in the string. special: Include at least one common special character in the string. more: Include at least one of other special characters in the string. ReduceConfusion: Remove "01l|O" characters. forbidden: Any characters that should not be in the final string. Outputs A string of random ACSII characters. """ # Create the random number generator rng = random.SystemRandom() # This variable will hold a string of potential characters from which to # make the password. ascii_chars = ""; # Digits ascii_digits = '0123456789' if flags['digits']: ascii_chars += ascii_digits # Uppercase Letters ascii_uppers = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" if flags['upper']: ascii_chars += ascii_uppers # Lowercase Letters ascii_lowers = "abcdefghijklmnopqrstuvwxyz" if flags['lower']: ascii_chars += ascii_lowers # Special Characters (Common) ascii_special = "_=+!@#$%&*?-" if flags['special']: ascii_chars += ascii_special # More special Characters (Less Common) ascii_more = r"""`|^\/~<>'",.(){}[];:""" if flags['more']: ascii_chars += ascii_more # Remove Confusion (no 1's, 0's, l's, |'s or O's) if flags['ReduceConfusion']: ascii_chars = ascii_chars.replace("l", "") ascii_chars = ascii_chars.replace("O", "") ascii_chars = ascii_chars.replace("1", "") ascii_chars = ascii_chars.replace("0", "") ascii_chars = ascii_chars.replace("|", "") # Remove any characters specified as forbidden by the user ascii_chars = [c for c in ascii_chars if c not in forbidden] def meetsCriteria(candidate): """Ensure the candidate password contains characters from each desired set of ASCII characters.""" met = True if flags['digits']: if len(set(candidate) & set(ascii_digits)) < 1: met = False if flags['upper']: if len(set(candidate) & set(ascii_uppers)) < 1: met = False if flags['lower']: if len(set(candidate) & set(ascii_lowers)) < 1: met = False if flags['special']: if len(set(candidate) & set(ascii_special)) < 1: met = False if flags['more']: if len(set(candidate) & set(ascii_more)) < 1: met = False return met # Generate the random password and ensure it meets the criteria (contains # characters from each specified group). pw = "" while not meetsCriteria(pw): pw = "" for count in range(0,n): # Randomly select a character from the list ridx = rng.randint(0, len(ascii_chars) - 1) pw += ascii_chars[ridx] return pw
2fd28c0cc1ad9205a054c31f13ab802890e96d5c
91,806
def datetime_to_iso8601(whendt): """ Converts a datetime object to a date string of the ISO8601 with timezone. The format is of the form ``YYYY-MM-DDTHH:MM:SS.MS-TZ``, for example:: 2020-06-10T01:47:35.186550-06:00 :param datetime whendt: Datetime object. *Any naive datetime object is considered a local timezone datetime.* To correctly pass a UTC datetime use:: from datetime import datetime, timezone now = datetime.now(timezone.utc) :return: A string with the datetime in the ISO8601 with timezone format. :rtype: str """ # Any naive datetime is a local timezone datetime if whendt.tzinfo is None: whendt = whendt.astimezone(tz=None) return whendt.isoformat()
d172e21038aa7917d2b04309e24a0a364a78ab50
91,808
import yaml def extractYAML(yaml_file): """This function extracts YAML vars from a YAML file. Args: yaml_file (string): String containing the path to YAML file. Returns: dictionary: Returns a dictionary with all values parsed from YAML file. """ try: with open(yaml_file) as var_file: yaml_output = yaml.load(var_file, Loader=yaml.FullLoader) return (yaml_output) except: print('Extracting YAML encountered an error')
1484b787875c12c46eea42fb8512a6ed554bd838
91,815
def is_set(val, mask): """Return True if bits in mask are also set in val.""" return val & mask == mask
2efb3a3477070b97b052af89a3a6b167050921ea
91,823
def cbdof(self, fname1="", ext1="", fname2="", ext2="", kpos="", clab="", kshs="", tolout="", tolhgt="", tolthk="", **kwargs): """Activates cut-boundary interpolation (for submodeling). APDL Command: CBDOF Parameters ---------- fname1 File name and directory path (248 characters maximum, including directory) from which to read boundary node data. If no specified directory path exists, the path defaults to your working directory and you can use all 248 characters for the file name. ext1 Filename extension (eight-character maximum). fname2 File name and directory path (248 characters maximum, including directory) to which cut-boundary D commands are written. If no specified directory path exists, the path defaults to your working directory and you can use all 248 characters for the file name. ext2 Filename extension (eight-character maximum). kpos Position on Fname2 to write block of D commands: 0 - Beginning of file (overwrite existing file). 1 - End of file (append to existing file). clab Label (eight characters maximum, including the colon) for this block of D commands on Fname2. his label is appended to the colon (:). Defaults to CBn, where n is the cumulative iteration number for the data set currently in the database. For imaginary data (see KIMG on the *SET command), Clab defaults to CIn. kshs Shell-to-solid submodeling key: 0 - Solid-to-solid or shell-to-shell submodel. 1 - Shell-to-solid submodel. tolout Extrapolation tolerance about elements, based on a fraction of the element dimension. Submodel nodes outside the element by more than TOLOUT are not accepted as candidates for DOF extrapolation. Defaults to 0.5 (50 percent). tolhgt Height tolerance above or below shell elements, in units of length. Used only for shell-to-shell submodeling (KSHS = 0). Submodel nodes off the element surface by more than TOLHGT are not accepted as candidates for degree-of-freedom interpolation or extrapolation. Defaults to 0.0001 times the maximum element dimension. tolthk Height tolerance above or below shell elements, based on a fraction of the shell element thickness. Used only for shell-to-solid submodeling (KSHS = 1). Submodel nodes off the element surface by more than TOLTHK are not accepted as candidates for DOF interpolation or extrapolation. Defaults to 0.1 times the average shell thickness. Notes ----- File Fname1 should contain a node list for which boundary conditions are to be interpolated (NWRITE). File Fname2 is created to contain interpolated boundary conditions written as a block of D commands. Boundary conditions are written for the active degree-of-freedom set for the element from which interpolation is performed. Interpolation occurs on the selected set of elements. The block of D commands begins with an identifying colon label and ends with a /EOF command. The colon label is of the form :Clab (described above). Interpolation from multiple results sets can be performed by looping through the results file in a user-defined macro. Additional blocks can be appended to Fname2 by using KPOS and unique colon labels. To read the block of commands, issue the /INPUT command with the appropriate colon label. If the model has coincident (or very close) nodes, the CBDOF must be applied to each part of the model separately to ensure that the mapping of the nodes is correct. For example, if nodes belonging to two adjacent parts linked by springs are coincident, the operation should be performed on each part of the model separately. Resume the coarse model database at the beginning of the cut-boundary procedure. The database should have been saved after the first coarse model solution, as the number of nodes in the database and the results file must match, and internal nodes are sometimes created during the solution. Caution: Relaxing the TOLHGT or TOLTHK tolerances to allow submodel nodes to be "found" can produce poor submodel results. """ command = f"CBDOF,{fname1},{ext1},,{fname2},{ext2},,{kpos},{clab},{kshs},{tolout},{tolhgt},{tolthk}" return self.run(command, **kwargs)
03d20f2e0933c2411a22ab540e38270532ed4826
91,825
import base64 def base64_decode(bot, trigger): """Decodes a base64 string.""" if not trigger.group(2): return bot.reply('I need something to decode.') try: decodedBytes = base64.b64decode(trigger.group(2).encode('utf-8')) decodedStr = str(decodedBytes, 'utf-8') bot.say(decodedStr) except Exception: bot.reply('I need real base64, fool.')
c8446acbd26158af4e9d421d39e195030f65c23d
91,830
def get_fname(fileish): """ Return filename from `fileish` """ if isinstance(fileish, str): return fileish return getattr(fileish, 'name', "<file object>")
cea97d0ba5de9fc13696ef74715de86a2b1ec863
91,831
import pathlib def check_rdma() -> bool: """Check if rdma is possible in this node. Returns: bool: true if rdma possible """ try: p = pathlib.Path("/dev/infiniband") verb_file_list = [x for x in p.iterdir() if "uverb" in p.name] except FileNotFoundError: return False use_rdma = len(verb_file_list) == 2 return use_rdma
901daf2b305ad8dbc9a23c81859530035883eaa1
91,835
import json def pretty_json(value): """ Pretty print JSON - for development purposes only. """ return "<pre>" + json.dumps(value, indent=4) + "</pre>"
bf889e6e797f4d5077190b79b088724f13bb277c
91,837
def _get_session_key_payload(_username, _password=None, _return_json=True): """This function constructs the payload used to request a session key. .. versionadded:: 3.5.0 :param _username: The username (i.e. login) for the user being authenticated :type _username: str :param _password: The password for the user being authenticated .. note:: A password is not required if authenticating a secondary user with a previously authenticated Administrator account. :type _password: str, None :param _return_json: Determines if the session key should be returned in JSON format (``True`` by default) :type _return_json: bool :returns: A dictionary with the authentication request payload """ _auth_payload = {'user.login': _username} if _password: _auth_payload.update({'user.password': _password}) if _return_json: _auth_payload.update({'restapi.response_format': 'json'}) return _auth_payload
a8377752ca27260fe85f2d3079a0e5adfedf7cd9
91,838
def firstOccurenceInStr(aList, aString): """ Return the first element in aList that is contained in the string aString. """ for elem in aList: if elem in aString: return elem else: return None
a303956fa7cd14b6cc46ec58c02774d0e95b3a02
91,841
import torch from typing import Tuple def _compute_splatted_colors_and_weights( occlusion_layers: torch.Tensor, # (N, H, W, 9) splat_colors_and_weights: torch.Tensor, # (N, H, W, K, 9, 5) ) -> Tuple[torch.Tensor, torch.Tensor]: """ Accumulate splatted colors in background, surface and foreground occlusion buffers. Args: occlusion_layers: (N, H, W, 9) tensor. See _compute_occlusion_layers. splat_colors_and_weights: (N, H, W, K, 9, 5) tensor. See _offset_splats. Returns: splatted_colors: (N, H, W, 4, 3) tensor. Last dimension corresponds to back- ground, surface, and foreground splat colors. splatted_weights: (N, H, W, 1, 3) tensor. Last dimension corresponds to back- ground, surface, and foreground splat weights and is used for normalization. """ N, H, W, K, _, _ = splat_colors_and_weights.shape # Create an occlusion mask, with the last dimension of length 3, corresponding to # background/surface/foreground splatting. E.g. occlusion_layer_mask[n,h,w,k,d,0] is # 1 if the pixel at hw is splatted from direction d such that the splatting pixel p # is below the splatted pixel q (in the background); otherwise, the value is 0. # occlusion_layer_mask[n,h,w,k,d,1] is 1 if the splatting pixel is at the same # surface level as the splatted pixel q, and occlusion_layer_mask[n,h,w,k,d,2] is # 1 only if the splatting pixel is in the foreground. layer_ids = torch.arange(K, device=splat_colors_and_weights.device).view( 1, 1, 1, K, 1 ) occlusion_layers = occlusion_layers.view(N, H, W, 1, 9) occlusion_layer_mask = torch.stack( [ occlusion_layers > layer_ids, # (N, H, W, K, 9) occlusion_layers == layer_ids, # (N, H, W, K, 9) occlusion_layers < layer_ids, # (N, H, W, K, 9) ], dim=5, ).float() # (N, H, W, K, 9, 3) # (N * H * W, 5, 9 * K) x (N * H * W, 9 * K, 3) -> (N * H * W, 5, 3) splatted_colors_and_weights = torch.bmm( splat_colors_and_weights.permute(0, 1, 2, 5, 3, 4).reshape( (N * H * W, 5, K * 9) ), occlusion_layer_mask.reshape((N * H * W, K * 9, 3)), ).reshape((N, H, W, 5, 3)) return ( splatted_colors_and_weights[..., :4, :], splatted_colors_and_weights[..., 4:5, :], )
5a607669f61e0e255e36b496cdf96e40466fe8a8
91,847
def add_search_bearings(search_bearing, perpendicular=True): """ Take in a single bearing or list of bearings and returns it either as a list of itself if perpendicular is set to False or a list of itself and all its parallel and perpendicular bearings if perpendicular is kept set to True. Use for filtering objects by desired bearings. Parameters ---------- search_bearing : list or integer perpendicular : boolean Returns ------- search_list : list """ search_list=[] if type(search_bearing) == list: for i in search_bearing: search_list.append(i) if perpendicular==True: for t in range(3): search_list.append((i+(90*(t+1)))%360) else: pass elif type(search_bearing) == int: search_list.append(search_bearing) if perpendicular==True: for t in range(3): search_list.append((search_bearing+(90*(t+1)))%360) else: pass else: print("Please make sure the bearing(s) you are searching for are either an integer or a list of integers") return search_list
01f3ba22fb1146484bf1b7c1821a6bd058d47ace
91,848
import collections def generate_vocab(text): """ Generate a vocabulary from `text`. Tokens are assumed to be separated by whitespaces. <unk> characters have been explicitely removed from this vocabulary generator. """ tokens = text.split() counter = collections.Counter(tokens) # We do not want to store the <unk>, <s>, </s> character in vocabulary del counter["<unk>"] del counter["<s>"] del counter["</s>"] count_pairs = sorted(counter.items(), key=lambda x: -x[1]) unique_tokens, _ = zip(*count_pairs) vocab = dict(zip(unique_tokens, range(len(unique_tokens)))) return vocab
a8ebd46b8d675f6bc1163543084607329669d293
91,849
def combine_id_and_collection(instance): """ Returns a string where the id and the collection string of an instance are combined. """ return "{}{}".format(instance.id, instance.get_collection_string())
23a9c492c55ea6ab7cafba23d0da75a500512740
91,850
def deal_with_input(box): """ This function will check the format of input rows. If user input non-alphabet or not single letters or no space between letters, it will print 'Illegal input'. If the input format is correct, the input string will be transform into list for example: an input of "a b c d" will be save into a list ['a', 'b', 'c', 'd'] :return: list_of_box (list) when legal format: a list contain 4 alphabets, when illegal format: empty list """ list_of_box = [] if len(box) != 7: print('Illegal input') else: if box[1] is not ' ' or box[3] is not ' ' or box[5] is not ' ': print('Illegal input') elif box[0].isalpha() and box[2].isalpha() and box[4].isalpha() and box[6].isalpha(): list_of_box.append(box[0]) list_of_box.append(box[2]) list_of_box.append(box[4]) list_of_box.append(box[6]) else: print('Illegal input') return list_of_box
862d4b507cf370360cb9c2de9bc0a76f099529a6
91,852
def fix_json(x): """ Replace single quotes in JSON with double quotes. """ if len(x) > 0: return str(x).replace("\'", "\"") else: return None
80674b7084f29f71a7dbf11481642c0efcae4bb4
91,855
import pkg_resources def get_packages(working_set): """ Return a sorted list of tuples of all package names and their version in working_set :param working_set: the working_set for all packages installed in this env :type working_set: setuptools.pkg_resources.WorkingSet obj :return: pkg_list: a list of tuples [('name','version')] e.g. [('resilient-circuits', '39.0.0')] :rtype: list """ isinstance(working_set, pkg_resources.WorkingSet) pkg_list = [] for pkg in working_set: pkg_list.append((pkg.project_name, pkg.version)) return sorted(pkg_list, key=lambda x: x[0].lower())
a9b2abfb5bcd37cb9067bf1d2862557eb755be71
91,857
import hashlib def get_sha1(filename): """获取基于文件内容的sha1值 """ # 文件需以二进制的方式读取,否则计算sha1时可能会出错 with open(filename, "rb") as f: sha1obj = hashlib.sha1() sha1obj.update(f.read()) # 返回32位的十六进制哈希值 return sha1obj.hexdigest()
c23791179c441c9761ce72505f32194ee8a4ff17
91,858
from pathlib import Path import yaml def read_config(path_to_file): """Reads a configuration file.""" path_to_file = Path(path_to_file) if not path_to_file.exists(): raise ValueError("Config {} does not exist.".format(path_to_file)) with path_to_file.open('r') as stream: try: return yaml.safe_load(stream) except yaml.YAMLError as exc: raise IOError(exc)
11bca301b331feb4441401c90ed922abe9d76ad3
91,859
def validate_no_marriage_to_siblings(gedcom): """ US18: No marriage to siblings """ errors = [] def get_families_with_spouse(spouse_id): result = [] for family in gedcom.families: if family.husband_id == spouse_id: result.append(family) elif family.wife_id == spouse_id: result.append(family) return result for family in gedcom.families: # Check whether child is married to their parent if not family.children or len(family.children) == 0: pass for child_id in family.children: siblings = list(filter(lambda x: x is not child_id, family.children)) # Check this child's fmailies to see if their spouses are siblings childs_families = get_families_with_spouse(child_id) for childs_family in childs_families: if childs_family.husband_id in siblings: errors.append(f'Error: US18: Individiual {childs_family.husband_id} is married to {child_id}, a sibling of theirs in Family {family.id}.') if childs_family.wife_id in siblings: errors.append(f'Error: US18: Individiual {childs_family.wife_id} is married to {child_id}, a sibling of theirs in Family {family.id}.') return errors
e21af713f8e4662c85c134bc854f4cc09f75ad27
91,860
def mit_hit_score( seq1: str, seq2: str, guide_strand_same=True, include_pam=False ) -> float: """Compute MIT mismatch score between two 20-mers or 23-mers. See 'Scores of single hits' on http://crispr.mit.edu/about See calcHitScore in https://github.com/maximilianh/crisporWebsite/blob/master/crispor.py Parameters ---------- seq1, seq2 : sequence two 20-mers to compare guide_strand_same : optional direction for starting with PAM include_pam : optional include extra 3bp for PAM. Returns ------- float MIT mismatch score between the two sequences Extremes. >>> mit_hit_score('AAAAAAAAAAAAAAAAAAAA', 'AAAAAAAAAAAAAAAAAAAA') 100.0 >>> mit_hit_score('AAAAAAAAAAAAAAAAAAAA', 'GAAAAAAAAAAAAAAAAAAA') 100.0 >>> mit_hit_score('AAAAAAAAAAAAAAAAAAAA', 'AAAAAAAAAAAAAAAAAAAG') 41.7 >>> mit_hit_score('ZZZZZZZZZZZZZZZZZZZZ', 'AAAAAAAAAAAAAAAAAAAA') 8.609700038185587e-08 Realistic. >>> mit_hit_score('AAGGCCAACCGGCGCCGCGC', 'GCGCGGCGCCGGTTGGCCTT') 6.039504885480631e-06 >>> mit_hit_score('GAAGGCCAACCGGCGCCGCG', 'CGCGGCGCCGGTTGGCCTTC') 1.6703747039472636e-05 Other direction. >>> mit_hit_score('AAAAAAAAAAAAAAAAAAAA', 'GAAAAAAAAAAAAAAAAAAA', False) 41.7 >>> mit_hit_score('AAAAAAAAAAAAAAAAAAAA', 'AAAAAAAAAAAAAAAAAAAG', False) 100.0 Real case. >>> seq1 = 'CTAAGAGCATTTACACAATACA'[::-1] >>> seq2 = 'ctgAGAGCATTTACACAATACA'[::-1] >>> mit_hit_score(seq1, seq2) 0.05972723076923077 Include PAM. >>> mit_hit_score('AAAAAAAAAAAAAAAAAAAAGGG', 'AAAAAAAAAAAAAAAAAAAATGG', include_pam=True) 100.0 >>> mit_hit_score('AAAAAAAAAAAAAAAAAAAAAGG', 'AAAAAAAAAAAAAAAAAAAAATT', include_pam=True) 0.20754716981132063 """ # aka Matrix "M" hit_score_m = [0, 0, 0.014, 0, 0, 0.395, 0.317, 0, 0.389, 0.079, 0.445, 0.508, 0.613, 0.851, 0.732, 0.828, 0.615, 0.804, 0.685, 0.583] if include_pam: # Add some high values, determined intuitively. hit_score_m += [0, 0.8, 0.8] # Go towards PAM if guide_strand_same is False: seq1 = seq1[::-1] seq2 = seq2[::-1] if include_pam: assert len(seq1) == 23 max_dist = 22 else: # Use most important 20bp only seq1 = seq1[-20:] seq2 = seq2[-20:] assert(len(seq1) == 20) max_dist = 19 dists = [] # distances between mismatches, for part 2 mm_count = 0 # number of mismatches, for part 3 last_mm_pos = None # position of last mismatch, used to calculate distance score1 = 1.0 for pos in range(0, len(seq1)): if seq1[pos] != seq2[pos]: mm_count += 1 if last_mm_pos != None: dists.append(pos - last_mm_pos) # type: ignore score1 *= 1 - hit_score_m[pos] last_mm_pos = pos # 2nd part of the score if mm_count < 2: # special case, not shown in the paper score2 = 1.0 else: avg_dist = sum(dists) / len(dists) score2 = 1.0 / (((max_dist - avg_dist) / float(max_dist)) * 4 + 1) # 3rd part of the score if mm_count == 0: # special case, not shown in the paper score3 = 1.0 else: score3 = 1.0 / (mm_count**2) return score1 * score2 * score3 * 100
a6925370f050f0c446483c83d88927fa7c4594e8
91,861
def clip(val, lower=0.0, upper=1.0): """ Clips val between lower and upper. >>> clip(1, 0, 2) 1 >>> clip(2, 3, 6) 3 >>> clip(5, 1, 2) 2 Works recursively on lists. >>> clip([-0.2, 0.5, 1.4, 0.7]) [0.0, 0.5, 1.0, 0.7] :param val: value to be clipped :param lower: lower bound :param upper: upper bound :return: val clipped between lower and upper """ if isinstance(val, list): return [clip(v, lower, upper) for v in val] return max(lower, min(upper, val))
f77fad2fadc17c3fb7a881e869e33ae195255410
91,866
def quantify_reporters(idx, scan, spectrum, precision, reporters, digits): """ :param idx: file index, for reporting only :param scan: scan number of the spectrum, for reporting only :param spectrum: spectrum list of [mz/I] :param precision: mass precision :param reporters: list of reporters to be quantified :param digits: number of significant digits to report :return: list of intensities """ tmt_intensities = [idx, scan] for reporter in reporters: upper = reporter + reporter * (precision / 2) * 1e-6 lower = reporter - reporter * (precision / 2) * 1e-6 reporter_intensity = sum([I for mz_value, I in spectrum if upper > mz_value > lower]) tmt_intensities.append(round(reporter_intensity, digits)) # Write total spectrum intensity spectrum_intensity = sum([I for mz_value, I in spectrum]) tmt_intensities.append(round(spectrum_intensity, digits)) return tmt_intensities
c7d15bf950689bf05dd6888a6c90d763ed0ba757
91,867
import struct def b_to_f(binary): """Convert binary to floating point""" return struct.unpack('f', struct.pack('I', binary))[0]
35616fc25e8d8af482d3a0049c90be0eb411fc6a
91,871
def is_positive(img): """ Returns a boolean version of `img` where the positive values are converted into True, the other values are False. """ return img > 0
1fb7b40b3483f5af38d8a1829c974d03b0349232
91,873
from pathlib import Path def non_empty_file(path: Path) -> bool: """ :param path: path of a file :return: True if given path is a file and is not empty otherwise return False """ return path.is_file() and path.stat().st_size > 0
16b7264410614a7d7b793c597f74241359ffae36
91,875
def set_region_appliance_association( self, appliance_region_map: dict, ) -> bool: """Set association between appliances and regions. Can set one or many associations. .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - regions - POST - /regions/appliances :param appliance_region_map: Dictionary where each key/value pair is an appliance nePk (e.g. ``3.NE``) and region_id. e.g. {"3.NE":"1", "10.NE","2",...} :type appliance_region_map: dict :return: Returns True/False based on successful call :rtype: bool """ return self._post( "/regions", data=appliance_region_map, expected_status=[204], return_type="bool", )
a020d8359a0f67592ffd3a7070eeb711c53ccbc7
91,876
import random def get_randstr(length=1): """ return string of random picked up chars :param length: string length to return """ chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz' retval = ''.join([random.choice(chars) for _ in range(length)]) return retval
fb3d552ea02427022547964115e277de19f2f376
91,882
def bool_to_word(boolean): """ Complete the method that takes a boolean value and return a "Yes" string for true, or a "No" string for false. :param boolean: boolean variable. :return: Yes if true, otherwise, No. """ return "Yes" if boolean else "No"
a362b7b591f1172b58fc2aa8d360db95b2c6aea7
91,883
def npath(path): """ Always return a native path, that is unicode on Python 3 and bytestring on Python 2. Noop for Python 3. """ return path
caa6dcd53f8a504a63921e60072538b2578cb669
91,884
def udfize_lambda_string(expression: str): """Given an expression that uses 'input' as a parameter, return a lambda as a string.""" return "lambda input: ({})".format(expression)
bdcc542568702e91f60400ef25f86df9d4847563
91,889
def is_list_empty(in_list): """Check if a list is empty ref: https://stackoverflow.com/questions/1593564/python-how-to-check-if-a-nested-list-is-essentially-empty""" if isinstance(in_list, list): # Is a list in_list = [x for x in in_list \ if x != ''] # remove empty strings return all(map(is_list_empty, in_list)) # return False
82aa4c85b33a6a2b6dbbef089d8ff7b42d5f6853
91,899
def missing_key_dummy_mapping(missing_keys): """Create a dummy key_mapping for INSPIRE texkeys Parameters ---------- missing_keys: array of string The keys from cite_keys which are INSPIRE keys but were not found in bib_dbs. Returns ------- key_mapping: dict Each key in missing_keys will appear in key_mapping as key_mapping[key]['texkey'] = key """ return { key: {'texkey': key} for key in missing_keys }
5930e6cd01e039bf67d8ef3b26bd0e04ed3aa5e5
91,902
import logging import time def driveListFilesQueryWithNextToken(service, parentID, customQuery=None, pageToken=None): """Internal function to search items in drive folders Args: service: Drive service (from getGoogleServices()['drive']) parentID (str): Drive folder ID of parent where to search customQuery (str): optional custom query parameters pageToken (str): optional page token for paging results of large sets Returns: Tuple (items, nextPageToken) containing the items found and pageToken to retrieve remaining data """ param = {} param['q'] = "'" + parentID + "' in parents and trashed = False" if customQuery: param['q'] += " and " + customQuery param['fields'] = 'nextPageToken, files(id, name)' param['pageToken'] = pageToken param['supportsTeamDrives'] = True param['includeTeamDriveItems'] = True # print(param) retriesLeft = 5 while retriesLeft > 0: retriesLeft -= 1 try: results = service.files().list(**param).execute() items = results.get('files', []) nextPageToken = results.get('nextPageToken') # print('Files: ', items) return (items, nextPageToken) except Exception as e: logging.warning('Error listing drive. %d retries left. %s', retriesLeft, str(e)) if retriesLeft > 0: time.sleep(5) # wait 5 seconds before retrying logging.error('Too many list failures') return None
75280c3b78ac1b93cc65ee5561bc90e3e0da114d
91,904
import re def ldap_escape_query(value): """Escape a value in an LDAP search query string (RFC4515).""" return re.sub(r"[*\\\0)(]", r"\\\g<0>", value)
fc2ebc9b9d7248a429f1ec3a1b3e16c28aaec652
91,910
def gcf_report_parser(gcf_report): """ Parses a GCF report file from NCBI and returns all the NCBI accessions gcf_report: return: a list of wgs accessions listed in the gcf report file """ fp = open(gcf_report, 'r') accessions = [x.strip().split('\t')[4] for x in fp if x[0] != '#'] return accessions
942432641251ef9ba0ea677a9999fae0290ee2bd
91,916
from functools import reduce import operator def SUMPRODUCT(array1, *more_arrays): """ Multiplies corresponding components in the given arrays, and returns the sum of those products. >>> SUMPRODUCT([3,8,1,4,6,9], [2,6,5,7,7,3]) 156 >>> SUMPRODUCT([], [], []) 0 >>> SUMPRODUCT([-0.25], [-2], [-3]) -1.5 >>> SUMPRODUCT([-0.25, -0.25], [-2, -2], [-3, -3]) -3.0 """ return sum(reduce(operator.mul, values) for values in zip(array1, *more_arrays))
5ce33c92fd1797957acaabbf4aa420a71027d6c9
91,922
import copy def _str_params(params): """ Convert NoneType values to the string 'None' for display. Parameters ---------- params: dict A dictionary of input values to a function """ params_str = copy.deepcopy(params) for k, v in params_str.items(): if v is None: params_str[k] = 'None' return params_str
f37d92b164b02519b755cc79759e0f51aa8f18b7
91,924
def isShiftCharacter(character): """Returns True if the key character is uppercase or shifted.""" return character.isupper() or character in '~!@#$%^&*()_+{}|:"<>?'
5b32c9b3cefd446f5a9b49cb5516e47be9594dff
91,930
from bs4 import BeautifulSoup def get_head_from_soup(soup:BeautifulSoup): """ Extract the <head> tag from a BeautifulSoup object. Args: input (BeautifulSoup): markup language as BeautifulSoup object Returns: head (str): <head> of the page Modules: bs4 (BeautifulSoup) """ # get the head: head = soup.find('head') # convert to string: head = str(head).strip() return head
38ecf5c7f6d6d7faa1c04d4e70517d526ad3f8cf
91,932
import math def get_lcm(a: int, b: int) -> int: """Gets the Least Common Multiple of two numbers Args: a (int): Value A b (int): Value B Returns: int: The least common multiple """ return abs(a * b) // math.gcd(a, b)
ae4c574f142aaeee97529cff9f1f670f1d88a400
91,941
def _check_missing(configured, expected): """Check if an item of the expected list is in configured, returns a list containing the items not found""" missing = [] for item in expected: if not item in configured: missing.append(item) return missing
383e1e79c86c08d613b12b486d4e195ded0c4bdf
91,943
def country_code_transform(country_code): """ Transform country code to the code used by VIES """ transform_dict = { "GR": "EL", } return transform_dict.get(country_code, country_code)
65953a86de2306e4ebd2168097f198f6ad153546
91,946
def match_to_dict(match): """ convert cv2.DMatch to a dict """ # not using match.imgIdx return dict( idx1=match.queryIdx, idx2=match.trainIdx, distance=match.distance, )
bafa061bf0dd178698f0143127f9a53f810119d8
91,947
def intersection(*arrays): """Computes the intersection of all the passed-in arrays. Args: arrays (list): Lists to process. Returns: list: Intersection of provided lists. Example: >>> intersection([1, 2, 3], [1, 2, 3, 4, 5]) [1, 2, 3] .. versionadded:: 1.0.0 """ return list(set(arrays[0]).intersection(*arrays))
026733e18354db5850a566f4a7365157495e55fc
91,949
def to_unicode(s, encoding='utf-8'): """Convert to unicode :param s: str or unicode :param encoding: encode type :return: unocide """ return s.decode(encoding)
ef5aaf3ba98219b19a026b34a59b52a3a4504522
91,955
def pad(array, desired_length, default_value): """Pads the input array to a given length with the supplied value.""" return array + [default_value] * (desired_length - len(array))
4537642f561adcb3c9f25da6cafcbe8f57e8f49b
91,957
def parse_probestr(probestr): """Prase probe str in nmap, returns name, payload """ name, payload, _ = probestr.split('|') return (name, payload)
0d96a36ec46cfaeca8b4e3c076269dffc46d60ef
91,958
def toper(self, parr="", par1="", oper="", par2="", fact1="", fact2="", con1="", **kwargs): """Operates on table parameters. APDL Command: *TOPER Parameters ---------- parr Name of the resulting table parameter. The command will create a table array parameter with this name. Any existing parameter with this name will be overwritten. par1 Name of the first table parameter. oper The operation to be performed: ADD. The operation is: ParR(i,j,k) = FACT1*Par1(i,j,k) + FACT2 *Par2(i,j,k) +CON1 par2 Name of the second table parameter. fact1 The first table parameter multiplying constant. Defaults to 1. fact2 The second table parameter multiplying constant. Defaults to 1. con1 The constant increment for offset. Defaults to 0. Notes ----- *TOPER operates on table parameters according to: ParR(i,j,k) = FACT1*Par1(i,j,k) + FACT2 *Par2(i,j,k) +CON1 Par1 and Par2 must have the same dimensions and the same variable names corresponding to those dimensions. Par1 and Par2 must also have identical index values for rows, columns, etc. If you want a local coordinate system for the resulting array, you must dimension it as such using the *DIM command before issuing *TOPER. This command is valid in any processor. """ command = f"*TOPER,{parr},{par1},{oper},{par2},{fact1},{fact2},{con1}" return self.run(command, **kwargs)
9dbf1096106a275ba68afa4536a8a98a9c832db7
91,962
def subclasses(cls, and_self=False): """ Return all subclasses for a given class. :param cls: A class object. :param and_self: Include the given class in the resulting list. """ base = [cls] if and_self else [] sub = cls.__subclasses__() return base + sub + [g for s in sub for g in subclasses(s, False)]
851c5832e01f3ebdc2ea7d6b0f102a872ba66077
91,964
import requests def __create_auth_github_session(cfg, session=None): """Create and return GitHub session authorized with token in configuration object""" session = session or requests.Session() token = cfg["github"]["token"] def token_auth(req): req.headers["Authorization"] = f"token {token}" return req session.auth = token_auth return session
2708543b99c013b31558838df3e4861288888b6c
91,966
def gcloud_auth(mocker): """Patches google.auth.default to bypass authentication""" m = mocker.patch("google.auth.default") m.return_value = (m.credentials, "test_project") return m
228844aafc9cdcd14fa7675c5b2e6c5ff624471b
91,969
def find_edges(faces): """ Find all edges on a mesh Parameters ---------- faces : list of lists of three integers the integers for each face are indices to vertices, starting from zero Returns ------- edges : list of lists of integers each element is a 2-tuple of vertex ids representing an edge Examples -------- >>> # Simple example: >>> from mindboggle.guts.mesh import find_edges >>> faces=[[0,1,2], [0,1,4], [1,2,3], [0,2,5]] >>> find_edges(faces) [[0, 1], [1, 2], [0, 2], [1, 4], [0, 4], [2, 3], [1, 3], [2, 5], [0, 5]] """ edges = [ ] for face in faces: for edge in [face[0:2], face[1:3], [face[0], face[2]] ]: if not edge in edges: # I know that this is costly edges.append(edge) return edges
1f7d47a1b1c27a4921c29f672dc7ed22fa6333a6
91,973
def get_applied_stereotypes(element): """Get collection of applied stereotypes to an element.""" return element.appliedStereotype[:].classifier
68294262d163690fbb7adf829efaf0858990dfda
91,976
from typing import List def generate_patterns(import_path: str, module_prefix: str) -> List[str]: """Generate a list of patterns to discover. For example, gen_patterns("myapp", "models") generates patterns that make matchers discover the content in the following files. myapp/users/models.py myapp/invoices/models.py (etc. for all domain packages beyond "users" and "invoices") ... myapp/users/models_roles.py myapp/users/models_groups.py (etc. for all modules started with "models_" in all domain packages) ... myapp/users/models/roles.py myapp/users/models/groups.py (if you prefer nested structures) """ return [ f"{import_path}.*.{module_prefix}", f"{import_path}.*.{module_prefix}_*", f"{import_path}.*.{module_prefix}.*", ]
ca790dd53b58dd4da0ba8e14a321468f926d4cbb
91,978
def get_file_paths(filename): """ Get paths from file. File should contain one path per line. Paths may be absolute or relative to the file's directory. Blank lines & comments are ignored. """ lines = [] with open(filename) as fh: for line in fh.readlines(): line = line.strip() if len(line) == 0 or line.startswith('#'): continue lines.append(line) return lines
37832e178dc31e8dd725f7e092f4439c4e926fe3
91,980
import re def node_is_weight(node): """ Judge if the node is weight. Args: node: A json node load from mxnet json file. Returns: True if the node is weight. False for input tensors and operators. """ ret = False if node["op"] == "null" and ("attrs" in node.keys() or \ re.search("_(weight|bias|var|mean|gamma|beta|label)", node["name"])): ret = True return ret
34acf948160449854ae728de8a74d7f96e5b7c95
91,983
def create_custom_metrics_conf(mds_gig_endpoint_region): """ Create the metrics extension config :param mds_gig_endpoint_region: mds gig endpoint region for the VM """ # Note : mds gig endpoint url is only for 3rd party customers. 1st party endpoint is different conf_json = '''{ "version": 17, "maxMetricAgeInSeconds": 0, "endpointsForClientForking": [], "homeStampGslbHostname": "''' + mds_gig_endpoint_region + '''.monitoring.azure.com", "endpointsForClientPublication": [ "https://''' + mds_gig_endpoint_region + '''.monitoring.azure.com/api/v1/ingestion/ingest" ] } ''' return conf_json
c9682af60301c38e9a557838cd930de0c8fabafd
91,985
import re def string2id(string): """Remove from string all characters that cannot be used in an identifier. """ return re.sub(r'[^a-zA-Z0-9_]', '', re.sub(r'\s+', '_', string.strip()))
3ac27488c1084e72b86f456e10840fa9f8d9036a
91,986
def map_range(x, out_min, out_max): """Linearly map the range of x from min(x), max(x) to out_min, out_max.""" in_min, in_max = x.min(), x.max() return (x - in_min) / (in_max - in_min) * (out_max - out_min) + out_min
69e85113c218829ce8105c23f9f36ff4a904ead8
91,988
def spin(mult): """ Calculate the spin (2Ms) from using the spin multiplicity which is equivalent to the number of unpaired electrons. :param mult: multiplicity :type mult: int :rtype: int """ return mult - 1
5b9436f06a98c48ca4463c46c41af03633477075
91,989
def get_processable_layers(layers): """ Returns computable layers from a list of layers, along with their types. We check that by searching specific keywords in layer's name, since we can't be sure about layer's type. If you wish to extend the project and add compatibility for more layers, you should as well update this function. """ processable_layers = [] for layer in layers: layer_name = layer.name.lower() if "conv" in layer_name: processable_layers.append([layer_name, "CONVOLUTIONAL"]) elif "primary" in layer_name and "caps" in layer_name: processable_layers.append([layer_name, "PRIMARY_CAPS"]) elif "caps" in layer_name: processable_layers.append([layer_name, "DENSE_CAPS"]) elif "mask" in layer_name: processable_layers.append([layer_name, "MASK"]) return processable_layers
ac77e821afc58b0d3bdbb0a23035b2ca48eb3fb0
91,990
def any_child(node, name_list): """ Return the first child of "node" with a name matching any in "name_list" """ for child in node.children: if child.name in name_list: return child
53e5fa1c4bedb5de12bd811731889b81e294765d
91,992
def format_location(course): """ Format the course location displayed """ if "Distanciel" in course['LOCATION']: return "Distanciel" return course['LOCATION']
a53dc6321b57358ac6368ec846e9583e5f86953c
91,996
def remove_keys_from_dict(keys, adict): """Return a dictionary without some keys in it. Will removed nested keys. """ for key in keys: try: del adict[key] except: pass for val in adict.values(): if isinstance(val, dict): remove_keys_from_dict(keys, val) return adict
7b38c3897721eace82d79fcd3326dcb786f5ba1e
92,000
def orderPath (nodesDict, start, stop, path): """ Internal function used by shortestWay to put into order nodes from the routing table. Return the shortest path from start to stop """ if start == stop: return path + [start] return orderPath (nodesDict, start, nodesDict[stop][0], path + [stop])
4db275a74eb563dee422bb8fbd961f0ce3553e35
92,007
def split_remote(ctx, param, value): """ Split remote/branch into tuple. """ # pylint: disable=unused-argument if value: try: remote, branch = value.split('/') except ValueError: remote, branch = value, None return remote, branch return value, value
f7cb88d30ae6b3b05e57dacd29b546f22731427d
92,011
def _create_var_metric_filename(input_dir, model_forcing, var, metric, startdate): """Create path to S2S metric file.""" name = f"{input_dir}/" name += f"{model_forcing}_" name += f"{var}_" name += f"{metric}_" name += f"init_monthly_{startdate.month:02d}_{startdate.year:04d}" name += ".nc" return name
327c2200ae3cc36ccc262ebad2adf8b3eb2e89e2
92,014
def image_repositories_callback(ctx, param, provided_value): """ Create an dictionary of function logical ids to ECR URIs. :param ctx: Click Context :param param: Param name :param provided_value: Value provided by Click, after being processed by ImageRepositoriesType. :return: dictionary of function logic ids to ECR URIs. """ image_repositories = {} for value in provided_value: image_repositories.update(value) return image_repositories if image_repositories else None
f7e3596b8000795fa8cde69a61d7cd92e63c5056
92,016
import shutil import tempfile from pathlib import Path def create_family_from_archive(cls, label, filepath_archive: Path, fmt=None, pseudo_type=None): """Construct a new pseudo family instance from a tar.gz archive. .. warning:: the archive should not contain any subdirectories, but just the pseudo potential files. :param cls: the pseudopotential family class to use, e.g. ``SsspFamily`` :param label: the label for the new family :param filepath_archive: absolute filepath to the .tar.gz archive containing the pseudo potentials :param fmt: the format of the archive, if not specified will attempt to guess based on extension of ``filepath`` :param pseudo_type: subclass of ``PseudoPotentialData`` to be used for the parsed pseudos. If not specified and the family only defines a single supported pseudo type in ``_pseudo_types`` then that will be used otherwise a ``ValueError`` is raised. :return: newly created family :raises OSError: if the archive could not be unpacked or pseudos in it could not be parsed into a family """ with tempfile.TemporaryDirectory() as dirpath: try: # In Python 3.6 the ``unpack_archive`` method does not yet support ``pathlib.Path`` objects. shutil.unpack_archive(str(filepath_archive), dirpath, format=fmt) except shutil.ReadError as exception: raise OSError(f'failed to unpack the archive `{filepath_archive}`: {exception}') from exception try: family = cls.create_from_folder(dirpath, label, pseudo_type=pseudo_type) except ValueError as exception: raise OSError(f'failed to parse pseudos from `{dirpath}`: {exception}') from exception return family
134bc787a22fb9f6bb3a34ec2453e7bc94576097
92,018
def is_int(val): """Check if input is an integer by running `int() <https://docs.python.org/3/library/functions.html#int>`_. If its successful we return **True** otherwise returns **False** """ try: int(val) except ValueError: return False return True
211d3bb640150f286b33494475d4505022db6003
92,019
def set_to_dm_limits(ptt_list, limit=5.): """ Check that the values for piston, tip, and tilt are not exceeding the hardware limit and reset to limit if limit is exceeded. These limits are the same as what the IrisAO GUI has set. :param ppt_list: list, of tuples existing of piston, tip, tilt, values for each segment in a pupil, in DM units :param limit: float, in DM units. Default = 5. :return: list of tuples of the piston, tip, tilt values in DM units for each segment listed such that none of the values exceed the limit """ updated = [tuple(min(i, limit) for i in ptt) for ptt in ptt_list] return updated
7255e9b1d4d7af1646983741a8f8bb6c24d0e3df
92,024
def _run_autocommit_on(cnx, db_parameters): """Run autocommit on test. Args: cnx: The database connection context. db_parameters: Database parameters. """ def exe(cnx, sql): return cnx.cursor().execute(sql.format(name=db_parameters["name"])) exe( cnx, """ INSERT INTO {name} VALUES(True), (False), (False) """, ) cnx.rollback() res = exe( cnx, """ SELECT COUNT(*) FROM {name} WHERE NOT c1 """, ).fetchone() assert res[0] == 4
636741c934bae0c7791ed7946c87e42600c2b0d2
92,027
def nearest_power_of_two(i): """ nearest_power_of_two(i) Returns the nearest power of two that is at least i. """ ii = 1 while ii<i: ii*=2 return ii
0adbdc2e833ff6925013eeaa8a3b85883aada9e5
92,029
def sorted_for_ner(crf_classes): """ Return labels sorted in a default order suitable for NER tasks: >>> sorted_for_ner(['B-ORG', 'B-PER', 'O', 'I-PER']) ['O', 'B-ORG', 'B-PER', 'I-PER'] """ def key(cls): if len(cls) > 2 and cls[1] == '-': # group names like B-ORG and I-ORG together return cls.split('-', 1)[1], cls return '', cls return sorted(crf_classes, key=key)
e25316cc9ef9e9fd9ef2056468b5e19d32f0dc38
92,032
def isPalindrome(s): """This function checks if a string is palindrome or not using recursion Input: String to be checked Output: True if it is a palindrome, False if it is not """ if len(s) == 0: return True if s[0] != s[len(s) - 1]: return False return isPalindrome(s[1:-1])
9a28032f95166be6f5874442133a111fca8cc9a3
92,034
def select_max(actions, rewards, priorites): """ Return action with max rewards, if there are many optimal actions, chose the one with highest priorites Args: actions(list): actions list rewards(list): rewards list priorites(dict): map(action -> priority) Returns: best: best action with max rewards and highest priority reward: max reward """ act_reward_prio = zip(actions, zip(rewards, priorites)) best_choice = max(act_reward_prio, key=lambda x: x[1]) return best_choice[0], best_choice[1][0]
2ca47c60f24b09466f8af7c6b208b72f51debbf4
92,038
def is_lazy_loaded(self, attribute_name): """ Indicates if the specified attribute is lazy loaded. :type attribute_name: String :param attribute_name: The attribute name. :rtype: bool :return: The lazy loaded flag. """ # sets the lazy loaded flag in case # the attribute value is not present lazy_loaded = not self.has_value(attribute_name) # returns the lazy loaded flag return lazy_loaded
e6e6e8d17cdcd787697800e268ee89eb0cff4b48
92,043
def render_facet(facet): """Renders the facet `facet`. `facet` is an elasticsearch-dsl facet, consisting of four items: the string display value, the integer count, the string link to apply/unapply the facet, and a Boolean indicating whether the facet is selected. """ context = { 'count': facet[1], 'label': facet[0], 'link': facet[2], 'selected': facet[3], } return context
bd943b04e9b6841563ce06d9db190598849770aa
92,047
def clamp(number: float, min_val: float, max_val: float) -> float: """Retuned the value of number clamped between min_val and max_val. Args: - number: The number to be clamped. - min_val: The minimum value of the returned number. - max_val: The maximum value of the returned number. Preconditions: - min_val <= max_val >>> clamp(0.0, 1.0, 2.0) 1.0 >>> clamp(5.0, 1.0, 2.0) 2.0 >>> clamp(1.5, 1.0, 2.0) 1.5 """ return max(min_val, min(max_val, number))
02b207f0dacfd2335501134467e0ab4ee8c7d6ec
92,048
def get_node_presence_from_link(node_presence, n, b, e): """ Return the maximal temporal node corresponding to (b,e,n) :param node_presence: :param n: node :param b: beginning of the interval (time) :param e: ending of the interval (time) :return: Maximal temporal node presence corresponding to (b,e,n) : (t0,t1) """ for t0, t1 in zip(node_presence[n][::2], node_presence[n][1::2]): if t0 <= b and e <= t1: return t0, t1 return None
fe7f560cb41ac9d8f994391df744eb718abce253
92,049
def _partition_name(dev): """ Derive the first partition name for a block device :param: dev: Full path to block device. :returns: str: Full path to first partition on block device. """ if dev[-1].isdigit(): return '{}p1'.format(dev) else: return '{}1'.format(dev)
f841e8a9ed566c6317cb392ff5ca4ab76a311237
92,050
def get_input(input_line, dataset): """ Turn line of embedding indexes to human readable words. """ sentence = [] for i in range(input_line.size()[0]): idx = input_line[i] if idx == dataset.pad_idx: break word = dataset.vocab.itos[idx] sentence.append(word) return ' '.join(sentence)
4ad431438395166f5aedbafa9cf63571663f1a54
92,052
import pickle import dill def unpickle_custom_operators(filename): """Load the custom operators from pickled file Args: filename (str): string indicates the path to custom operator pickled file Return: Lists of nodal, pool, operator set """ nodal_set = [] pool_set = [] activation_set = [] with open(filename, 'rb') as fid: operators = pickle.load(fid) pickled_func = operators['pickled_func'] for nodal in operators['nodal_set']: if isinstance(nodal, str): nodal_set.append(nodal) else: nodal_set.append(dill.loads(pickled_func[nodal])) for pool in operators['pool_set']: if isinstance(pool, str): pool_set.append(pool) else: pool_set.append(dill.loads(pickled_func[pool])) for activation in operators['activation_set']: if isinstance(activation, str): activation_set.append(activation) else: activation_set.append(dill.loads(pickled_func[activation])) return nodal_set, pool_set, activation_set
6e112ae7be889d73bad5a1c0ec7489c964fec040
92,058
from typing import Dict from typing import Any def CountList(L: list) -> Dict[Any, int]: """ Count the number of occurances of each index Args: L (list): The input List Returns: Dict[Any, int]: A dictionary of indexes and how often they occur. """ counted = {} for x in L: if x in counted.keys(): counted[x] += 1 else: counted[x] = 1 return counted
22a0d294660a9ccfd77eb672ad78c2048283bf3f
92,066
def get_table_number(table_lookup, table_name): """ Auxiliary function to retrieve the internal pit number for a given pandapipes net table name from the table lookup. :param table_lookup: The lookup dictionary from table names to internal number (n2t) and vice \ versa (t2n) :type table_lookup: dict :param table_name: Name of the table for which the internal number shall be retrieved :type table_name: str :return: table_number - Internal number of the given table name within the pit :rtype: int """ if table_name not in table_lookup["t2n"]: return None return table_lookup["t2n"][table_name]
6072fc81a7872c7d663261c1c2fe127a0459e375
92,067
def create_req_xml(layer, date): """Create XML string representing the WMS-TMS gibs service """ xml = f"""<GDAL_WMS> <Service name="TMS"> <ServerUrl>http://gibs.earthdata.nasa.gov/wmts/epsg4326/best/{layer}/default/{date}/250m/${{z}}/${{y}}/${{x}}.jpg</ServerUrl> </Service> <DataWindow> <UpperLeftX>-180.0</UpperLeftX> <UpperLeftY>90</UpperLeftY> <LowerRightX>396.0</LowerRightX> <LowerRightY>-198</LowerRightY> <TileLevel>8</TileLevel> <TileCountX>2</TileCountX> <TileCountY>1</TileCountY> <YOrigin>top</YOrigin> </DataWindow> <Projection>EPSG:4326</Projection> <BlockSizeX>512</BlockSizeX> <BlockSizeY>512</BlockSizeY> <BandsCount>3</BandsCount> </GDAL_WMS>""" return xml.encode()
d466b420a2a9550fd830fa0cdff42727edb03627
92,071
def get_sequence_item(self, index): """ operator[] for sequence, support both positive and negative indexes """ if len(self.value) > index >= -len(self.value): return self.value[index] raise IndexError
c2cc7eabeeaa7cec89be21d2162262dd5df55b6a
92,072
import struct def U64(v): """Unpack an 8-byte string as a 64-bit long""" h, l = struct.unpack(">II", v) if h: return (h << 32) + l else: return l
a1498d305fbfe1d3bcd4df474407c5593f2b5adf
92,075
def CI_compare(CI1, CI2): """Return +1 if CI1 > CI2, -1 if CI1 < CI2, 0 if overlapping""" if CI1[1] < CI2[0]: return -1 elif CI2[1] < CI1[0]: return +1 else: return 0
f07fe7c77a219d074d0ee46536078bbacb37b03f
92,082
def get_file_extension(filepath): """Return full file extension from filepath""" filename = filepath.split('/')[-1] return filename[filename.index('.'):]
33bf9cfb797612e84d130750d97c1ced0233b55c
92,083
import glob def get_image_paths(dataset='cars'): """ Loads image paths for selected dataset. :param dataset: Dataset that should be loaded. ['cars', 'notcars'] :return: Path list """ path_list = [] if dataset == 'cars': print('Loading data set for \'cars\'...') # GTI path = 'data/vehicles/GTI*/*.png' paths = glob.glob(path) print(path) print('\t{} elements'.format(len(paths))) path_list += paths # KITTI path = 'data/vehicles/KITTI*/*.png' paths = glob.glob(path) print(path) print('\t{} elements'.format(len(paths))) path_list += paths elif dataset == 'notcars': print('Loading data set for \'notcars\'...') # GTI path = 'data/non-vehicles/GTI*/*.png' paths = glob.glob(path) print(path) print('\t{} elements'.format(len(paths))) path_list += paths # Udacity data path = 'data/non-vehicles/Extras/*.png' paths = glob.glob(path) print(path) print('\t{} elements'.format(len(paths))) path_list += paths # Manually extracted data path = 'data/non-vehicles/Extracted/*.png' paths = glob.glob(path) print(path) print('\t{} elements'.format(len(paths))) path_list += paths else: raise Exception('There are only two possible choices for c: \'cars\' and \'notcars\'') return path_list
e644fa7a8d83b5bf27375b9558f317ff0ed9e2c8
92,089
def findgcd(x, y): """This function returns the greatest common factor/divisor.""" if x % y == 0: # if statement to identify return y else: return findgcd(y, x % y)
87c7d2913aec40e8b386643f59806b1fb0502cf9
92,091
import inspect def signature_matches(func, args=(), kwargs={}): """ Work out if a function is callable with some args or not. """ try: sig = inspect.signature(func) sig.bind(*args, **kwargs) except TypeError: return False else: return True
5ae01cbbb3a016f8e0ea52b3c9b124e5b5d5ae13
92,093
from bs4 import BeautifulSoup def extract_text(body: BeautifulSoup) -> str: """ Extract all text from the page. """ return body.get_text()
12bf108b1efe6b5ed69d77999524dbbd4613931e
92,095
def rt_match(rt1, rt2, parameters): """Assess peak match based on rt values and rt error""" corrected_rt1 = (rt1 - parameters.standards_intercept) / parameters.standards_slope if corrected_rt1 - parameters.rt_error <= rt2 <= corrected_rt1 + parameters.rt_error: return True else: return False
965c4c4a1d8aafa99ebae3079f1e836d828b0a4a
92,097
def known_missing_split_1d(label_data, feature_data, split_axis=1, missing_val_filled=False, fill_val=None): """Returns index of the dataset corresponding to known and missing ratings in the label data (row or column to be predicted) Parameters: label_df (DataFrame) : contains the column/row to be predicted feature_df (DataFrame) : contains the features split_axis (int) : The axis along the utility matrix is split, {0/'index', 1/'columns'}, default 1 missing_val_filled (bool) : Indicates whether missing/null values in the label/feature data were filled fill_val (None/float) : Value used to fill the null values when missing_val_filled==True, default None Returns: X_known.index : index corresponding to known ratings X_missing.index : index corresponding to missing/unknown ratings """ if missing_val_filled: if fill_val is None: missing_vals = (label_data == 0).values.flatten() else: missing_vals = (label_data == fill_val).values.flatten() else: missing_vals = label_data.isnull().values.flatten() if split_axis == 1: X_missing = feature_data.loc[missing_vals, :] X_known = feature_data.loc[~missing_vals, :] elif split_axis == 0: X_missing = feature_data.loc[:, missing_vals] X_known = feature_data.loc[:, ~missing_vals] else: X_missing = feature_data.loc[missing_vals, :] X_known = feature_data.loc[~missing_vals, :] return X_known.index, X_missing.index
cf7cf508f66f1c8d20694c1dd51e0b90d402f3fc
92,098