content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import json def get_users(cwd, file_name): """Get users and their passwords. :param str cwd: current working directory :param str file_name: name of the file containing the credentials :return: dictionary of users and passwords """ try: with open(cwd + '/' + file_name + '.json') as registered_users: users = json.load(registered_users) return users except FileNotFoundError: return None
3009f8336f13bdce11f0faacc288489fee81a3c2
84,682
def rotate_layer(layer: tuple, rotation_diff: int) -> tuple: """Rotate layer by given rotation angle""" _, _, rotation, distribution = layer new_rotation = rotation + rotation_diff new_rotation = new_rotation % 180 if distribution == "Normal" else new_rotation % 360 new_layer = list(layer) new_layer[2] = new_rotation return tuple(new_layer)
41355468101de138d49700b5d595115b74201d93
84,684
import unicodedata def _preprocess_line(line: str, do_lower_case: bool = False) -> str: """Preprocesses an individual raw text line. This function will: - Remove extraneous spaces. - Replace `` with ", and '' with ". - Replaces accents. - Applies lower casing. Args: line: The input line to preprocess. do_lower_case: Whether or not to lower case the text. Returns: The preprocessed line. """ line = " ".join(line.split()) line = line.replace("``", "\"").replace("''", "\"") # Replace accents. line = unicodedata.normalize("NFKD", line) line = "".join([c for c in line if not unicodedata.combining(c)]) if do_lower_case: line = line.lower() return line
937928f14269c4d4a3b43c0c0fc128cef6cf6e94
84,685
def get_data(sheet): """Return data (exc headers) from a sheet as a list of rows, with each row being a list representing all cell values in that row """ rows_exc_header = sheet["data"][0]["rowData"][1:] data = [] for id_, row in enumerate(rows_exc_header, start=2): row_data = [id_] for cell in row["values"]: cell_value = cell["userEnteredValue"]["stringValue"] row_data.append(cell_value) data.append(row_data) return data
5fd4154fefbd4018925060fc4753c69876e39d33
84,686
def get_class_name(name): """ tries to return a CamelCased class name as good as poosible capitalize split at underscores "_" and capitelize the following letter merge this_is_Test => ThisIsTest test => Test testone => Testone """ #print("class_name: " + "".join([c.capitalize() for c in name.split("_")])) return "".join([c.capitalize() for c in name.split("_")])
4dc1f95feeef148538e6a7aee135a7228e2238eb
84,689
def bdev_null_resize(client, name, new_size): """Resize null bdev in the system. Args: name: name of null bdev to resize new_size: new bdev size of resize operation. The unit is MiB """ params = { 'name': name, 'new_size': new_size, } return client.call('bdev_null_resize', params)
ddcb3b59d9390afbedd0cb05e2f2b3fe4a39ec86
84,692
def find_seatings(names, table): """Find all seating combinations for names.""" if not names: return [table] seatings = [] for name in names: cur_names = names - {name} seatings.extend(find_seatings(cur_names, table + [name])) return seatings
f9d93e69903749b35cc113d3ff64cc7801c1a248
84,695
def average_to_hit(accuracy): """ Returns the average chance to hit. This will multiply the accuracy by the average soldier accuracy. """ return accuracy / 100 * 0.55
8c9595c2b1e976cfdaa2e835476bf75d4320dad3
84,696
import torch def get_reward_mab(a_t, rewards): """define the reward function at time t Parameters ---------- a_t : int action rewards : list rewards across arms Returns ------- torch.FloatTensor, scalar immediate reward at time t """ r_t = rewards[a_t] return r_t.type(torch.FloatTensor).data
76a1ea8a093630da6ba367a8e31f7c31213bdf15
84,697
def combine_labels(left, right): """ For use with the join operator &: Combine left input/output labels with right input/output labels. If none of the labels conflict then this just returns a sum of tuples. However if *any* of the labels conflict, this appends '0' to the left-hand labels and '1' to the right-hand labels so there is no ambiguity). """ if set(left).intersection(right): left = tuple(l + '0' for l in left) right = tuple(r + '1' for r in right) return left + right
23d6019832a1c89d18c5c2149df5dc57d70f2907
84,701
def parse_sample_ids(sampleidfile): """ Read the sample ID file and return a hash of SRS to list of SRR (SRS->[SRR]) """ srs={} with open(sampleidfile, 'r') as f: for l in f: p=l.strip().split("\t") if p[1] not in srs: srs[p[1]]=[] srs[p[1]].append(p[0]) return srs
401e44c6bda2273ab8692f68570972ea06116223
84,702
import random def randomize(values): """ this is a wrapper that returns a function which when called returns a random value""" def picker(): return random.choice(values) return picker
81515328e29c05beaaf955d719522df93a3a5a12
84,708
import ntpath def leafname(path): """ gets the local filename if a file or foldername if a folder :param path: the filepath from which to extract the leaf name segment :return: the last name segment in the filepath """ head, tail = ntpath.split(path) return tail or ntpath.basename(head)
1d46ef10781072069cb8ed1a0461d144a0f58040
84,710
def _deduplicate_and_remove_empty_options(zipped): """Remove option-bitmap pairs where option is duplicate or empty. Args: zipped: list of tuples [(option, bitmap), ...]. Returns: `zipped` without such elements where `option` is duplicate in the list or is empty. """ stored_options = set() zipped_unique = [] for (option, bitmap) in zipped: # remove empty options and duplicate options at once if option and option not in stored_options: zipped_unique.append((option, bitmap)) stored_options.add(option) return zipped_unique
277a791c574ab5512aec24323480d514297689cf
84,713
def eq(line): """ Turns a line into a nice string representation. """ rho, theta = line r = '{:6.2f}'.format(float(rho)) t = '{:4.4f}'.format(float(theta)) return r + ' = x * sin(' + t + ') + y * cos(' + t + ')'
babc697e37989258d578792068bb450f3b7f3e45
84,715
def read_file(file_name): """Read the input file into a list of lines.""" try: with open(file_name, 'r') as f: read_lines = f.readlines() except FileNotFoundError: print("File could not be found. Please try again.") raise SystemExit(1) except PermissionError: print("Could not open file (insufficient privileges).") raise SystemExit(1) return read_lines
8934f171f6709e3641f3f237c71dcecde6e564b9
84,718
def _get_sp_txt(p): """ Produces a comma separated string of the defined subproject names, if avaialble :param looper.Project p: project to search the subprojects in :return str | NoneType: subprojects names """ try: sp_names = p.subprojects.keys() except AttributeError: sp_names = None return ",".join(sp_names) if sp_names is not None else sp_names
2f47d5f38df1aa903a57401eb2aecc0c86c05da0
84,720
import hashlib from pathlib import Path def md5(fname): """ Calculates the md5sum of the passed file and returns the calculated value. Parameters ---------- fname: str filename of file to calculate the md5sum for Returns -------- str md5sum of passed file """ hash_md5 = hashlib.md5() if isinstance(fname, str): fname = Path(fname) assert isinstance(fname, Path) assert fname.exists() with open(str(fname), "rb") as file_of_interest: for chunk in iter(lambda: file_of_interest.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest()
272aa1eae6d82a9ec0996e93d4c6e3b4baa5a5b8
84,721
def process_features(features): """ Use to implement custom feature engineering logic. Default behaviour is to return the original feature tensors dictionary as-is. Args: features: {string:tensors} - dictionary of feature tensors Returns: {string:tensors}: extended feature tensors dictionary """ return features
61d10ec05b7c0c823983d21467935e97755b3ecb
84,722
def interpret_colour(s, context=None): """Convert a raw Color value to a gomill colour. Returns 'b' or 'w'. """ colour = s.decode('ascii').lower() if colour not in ('b', 'w'): raise ValueError return colour
83f6e1977ef62f622c34814bc713420ffeb2c0fd
84,724
def extract_sentences(lines): """ Extracts the non-empty sentences and their numbers of the "lines" field in a JSON object from a FEVER wiki-pages JSONL file. """ sentences = [] sentence_index = 0 for line in lines.split('\n'): tokens = line.split('\t') if not tokens[0].isnumeric() or int(tokens[0]) != sentence_index: # skip non-sentences, caused by unexpected \n's continue else: sentences.append((tokens[1], tokens[0])) sentence_index += 1 return sentences
94b78de6423604d6e03850ccf4d501823aeeee8f
84,726
def triangular_numbers(n): """[Triangular Numbers - A000217](https://oeis.org/A000217) Arguments: n (Integer): Index of the sequence Returns: Integer: Value of this sequence at the specified index """ return n * (n + 1) / 2
1acafdb7cd043d25ab4fdbcefe3c8960f2e2bcd8
84,730
def vm_to_mm(verbal_model): """ Convert VM to MM. Time information will be lost. >>> a = VerbalModels.Prop(name="a", neg=False, identifying=False, t=123) >>> non_b = VerbalModels.Prop(name="b", neg=True, identifying=False, t=456) >>> vm = [VerbalModels.Individual(props=[a], t=789), VerbalModels.Individual(props=[a, non_b], t=159)] >>> vm_to_mm(vm) [['a'], ['a', '-b']] """ mental_model = [] for i, ind in enumerate(verbal_model): mental_model.append([]) for p in ind.props: p_str = "-"+p.name if p.neg else p.name mental_model[i].append(p_str) return [sorted(row, key=lambda e: e[-1]) for row in mental_model]
8bfdde6f0c497f3daa0367ee5a01f161ad02cbf6
84,733
def parse_params_int(params, p): """ Get and parse an int value from request params. """ val = params.pop(p, None) try: return int(val, 10) except ValueError: return None
64d2688db2ea3c3a98489425d4b389d85a776c19
84,735
def readAnchorsFile(fileName): """ Reads anchor/seeds from fileName and returns list-of-lists anchorList """ anchorList = [] anchorFile = open(fileName, 'r') for line in anchorFile: wordList = line.strip().split() if len(wordList) > 0: anchorList.append(wordList) anchorFile.close() return anchorList
2bd443a83eaf3db8f6fbcc7c9476cbbfdff42387
84,737
def valid_role(role): """ Args: role (str): name of a role Returns: Bool: True if the role is not administrative """ return role not in [ 'userAdminAnyDatabase', 'dbAdminAnyDatabase' 'dbAdmin', 'dbOwner', 'userAdmin', 'clusterAdmin', 'root']
fc2d0ac714e734d477a87f5e6c037c4c60af2e36
84,746
def get_assignment_details(app_detail_result): """ Method to fetch smart group ids, names and push mode where the app has been deployed :param app_detail_result: json returned from the app details api :return: list of smart group ids, list of smart group names and push mode """ assignments = app_detail_result['Assignments'] sg_ids = [groups['SmartGroupId'] for groups in assignments] sg_names = [groups['SmartGroupName'] for groups in assignments] push_mode = [groups['PushMode'] for groups in assignments] return sg_ids, sg_names, push_mode
ff344d09826e0d5de27a9ed573ceb763ee330eea
84,747
def ticket_name(branch_name): """ Assume the naming convention <ticket_no><underscore><description> and return <ticket_no> Where: <underscore> -> "_" The delimiter is an <underscore> In case this is not respected it will return the token up to the first <underscore>, or everything if none is found. :param str branch_name: name of the branch we are currently in :return: ticket number from the branch """ ticket_no, _, _ = branch_name.partition("_") return ticket_no
c47db578355b052a7d95d6e58084e80f4532f288
84,750
import requests def get_options(server): """Retrieve all allowed HTTP verbs/methods.""" try: response = requests.options( server, allow_redirects=False, verify=False, timeout=5 ) except ( requests.exceptions.ConnectionError, requests.exceptions.MissingSchema, ): return None try: return response.headers["Allow"] except KeyError: return None
ee3b068aaf500c4484ee2cf017bfda8cdb0c9ee7
84,752
from typing import List def order_and_station(order_list: List[dict], order_name: str, station_name: str) -> tuple: """ Return an index-tuple containing the index of an order, and the index of a specific station used in it. If the order does not exist, or the order does not contain the specific station (-1,-1) ist returned """ for order_index, order in enumerate(order_list): if order['name'] == order_name: for station_index, station in enumerate(order['station']): if station == station_name: return order_index, station_index return -1, -1
4dc5048302dbd64db9b3d96470668b917f9ec5d2
84,753
def get_speaking_characters(raw_play_lines, character_matcher): """ Return a set of all character names Parameters ---------- raw_play_lines : list of str lines of the play. character_matcher : compiled regex expression used to extract character names from raw_play_lines, regex must include group called 'name'a. """ return { matched_line.group('name').upper() for matched_line in ( character_matcher.search(line) for line in raw_play_lines ) if matched_line }
42e874101fb8f465bd5fa570f7c466b0fdd8a2d8
84,755
def wants_plain_hotdog(ketchup, mustard, onion): """Return whether the customer wants a plain hot dog with no toppings. """ return not ketchup and not mustard and not onion
78f7308cd63f7133e1f96202164246e0d0cd9a8a
84,758
def _has_child_providers(ctx, rp_uuid): """Returns True if the supplied resource provider has any child providers, False otherwise """ query = """ MATCH (rp:RESOURCE_PROVIDER {uuid: '%s'})-[:CONTAINS]->(child) RETURN count(child) AS num """ % rp_uuid result = ctx.tx.run(query).data() return bool(result[0]["num"])
0c9b17967ea28759974c2485cc928320a160c734
84,761
def make_permission_config_key(view): """ Generates the the key to be stored in configuration for a given view :type view: rest_framework.views.APIView """ return "api_permission_{}".format(view.__class__.__name__)
cbb84f80250b72a6e433c606633bcd53cb19821a
84,762
def _remove_empty_parts(tagged_parts_list): """ Remove all the empty parts in the list of tagged parts """ tagged_parts_list = [part for part in tagged_parts_list if len(part[0]) > 0] return tagged_parts_list
cc8c9abf31357af63e45cd47d2cfb4f987ffdd7d
84,764
import struct def _unpack_uuid(uuid): """Unpack a PXE UUID to its long form (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx).""" fields = ['%02x' % x for x in struct.unpack('!16B', uuid)] return '%s-%s-%s-%s-%s' % (''.join(fields[:4]), ''.join(fields[4:6]), ''.join(fields[6:8]), ''.join(fields[8:10]), ''.join(fields[10:16]))
59c4086196989e83154da1f1bce430d028dd1357
84,771
import torch def create_sparse_input(dictionary, K=1, num_samples=1): """ Create sparse data given a dictionary and sparsity value. :param dictionary: Tensor. Dictionary to base the data on. :param K: Int. Sparsity value, use this number of atoms to create the data. :param num_samples: Number of samples to create. :return: Tensor. Created data """ atoms = torch.randint(dictionary.size()[1], (num_samples, K)) coefs = torch.randn((K, num_samples, 1)) X = [] for sample_ind in range(num_samples): input = dictionary[:, atoms[sample_ind,:]].mm(coefs[:, sample_ind]) X.append(input) X = torch.stack(X, 1).squeeze(-1) return X, atoms, coefs
b769dbd1ceb2a7ea7e0a55e91bc7466b0ff82047
84,772
def getRightChild(root): """Get the right child.""" return root[2]
4d6a261f6a532c985508ecdc3155a50b2390cec8
84,775
def get_key_for_grouping(item): """ Returns the key for grouping params during update of SqE. Parameters ---------- item : tuple (key, value) as given by dict.items() """ try: return item[0].split("_")[1] # This should be the name of the line except IndexError: return "model_params"
9ac067d36aa87c170046bc534f2a9b267e6bf62d
84,784
def _create_train_test_split(training_images_list, label_images_list, train_split_percent): """ Divides the training images folder into training images and test images, with train_split percent being the percent of images in training set and the rest in test set. Note that, the Nerve Segmentation Challenge gives a set of images in the 'test' folder. That folder is not touched at all during the training process. It is referred to as the validation data throughout the documentation of this code. Test images in our case is a fraction of the images in the 'train' folder :param training_images_list: list of images from the training folder :param label_images_list: list of label images from the training folder :param train_split_percent: percentage of images used for training :return: """ assert len(training_images_list) == len(label_images_list), ('Number of training images and label ' 'images must be same. Please make sure ' 'equal number of training images and ' 'label images') split_index = int(len(training_images_list) * train_split_percent) train_split_images_list = training_images_list[0:split_index] train_split_labels_list = label_images_list[0:split_index] test_split_images_list = training_images_list[split_index:len(training_images_list)] test_split_labels_list = label_images_list[split_index:len(training_images_list)] print('Finished splitting data into %s training images and %s ' 'test images' % (len(train_split_images_list), len(test_split_images_list))) return train_split_images_list, train_split_labels_list, test_split_images_list, test_split_labels_list
a1e149d1ab04b62b29d378ccf6cb52375eacf3e6
84,803
def parent(node_index): """ Computes the parent node index. """ return node_index // 2
3634c710cceb40c697049de14b60a4eeb88ec4bf
84,805
def construct_email(cfg, query, location, posts): """Construct an email message. Args: cfg: email configuration. query: search term used. location: location to search for jobs. posts: dictionary containing new job listings. Returns: message: string containing the email message. """ nposts = len(posts) # unpack required variables user, send_to = cfg['email_from'], cfg['email_to'] # unpack optional variables try: name = cfg['name'] except KeyError: # if the `name` key isn't present, then use the first part of the # email address to address recipient. name = cfg['email_to'].split('@')[0] try: sender_name = cfg['sender_name'] except KeyError: sender_name = cfg['email_from'] try: signature = cfg['signature'] except KeyError: signature = '' # some temporary variables to correct grammar in the email message was_were = 'was' if nposts == 1 else 'were' is_are = 'is' if nposts == 1 else 'are' job_s = 'job' if nposts == 1 else 'jobs' listing_s = 'listing' if nposts == 1 else 'listings' subject = f'Job opportunities: {nposts} new {job_s} posted' description = '{}. {jobtitle} @ {company}\nLink: {url}\nLocation: {location}\nSnippet: {desc}\n' posts_content = '\n'.join(description.format(i+1, **p) for i, p in enumerate(posts.values())) s = ( f'From: {sender_name} <{user}>\n' f'To: {send_to}\n' f'Subject: {subject}\n' f'Hello {name},\n\n' f'There {is_are} {nposts} new job {listing_s} to review.\n' f'The following job {listing_s} {was_were} found for {repr(query)} in {repr(location)}:\n\n' f'{posts_content}\n' f'{signature}' ) return s
a0648e39118518317f2b252c9ac8a9c0f25284d2
84,808
def get_gpu_memory(gpus): """ Small heuristic to calculate the amount of memory needed for each GPU in case of multi-gpu training. """ if len(gpus) >= 5: return 0.4 elif len(gpus) >= 3: return 0.5 elif len(gpus) == 2: return 0.6 else: return 0.6
ef9c5e9e54ca514a473f4d03f9fabea600db3bb6
84,818
def next_greater_digit_index(digits: list, i: int) -> int: """ Find the next greater digit in the right portion of number[i] - that is from digit at index i+1 to last digit. Let that digit be number[j] at index 'j'. :param digits: list of digits :param i: index of number[i] :return: next greater digit in the right portion of number[i] """ j: int = -1 current = '' if len(digits[i:]) == 1 and digits[1] > digits[0]: return i else: for index, digit in enumerate(digits[i:]): if digits[i - 1] < digit: if current == '': current = digit j = i + index elif current > digit: current = digit j = i + index return j
a22ff2ea28ee04c0279ed1f0ed52e5e22a82c2c5
84,819
def linear_anneal(base_lr, global_step, warmup_steps, min_lr): """ Linearly annealed learning rate from 0 in the first warming up epochs. :param base_lr: base learning rate :type base_lr: float :param global_step: global training steps :type global_step: int :param warmup_steps: number of steps for warming up :type warmup_steps: int :param min_lr: minimum learning rate :type min_lr: float :return: scheduled learning rate :rtype: float """ lr = max(min_lr + (base_lr - min_lr) * (1.0 - global_step / warmup_steps), min_lr) return lr
d958371494139d742622bea62df06993553a6561
84,822
def validate_distance(distance: float) -> float: """ Validates the distance of an object. :param distance: The distance of the object. :return: The validated distance. """ if distance < 0: raise ValueError("The distance must be zero or positive.") return distance
f14ba86c16ea5b79049173095ee24bfd652ca57e
84,823
def comp_lengths_winding(self): """Compute the lengths of the Lamination's Winding. - Lwtot : total length of lamination winding incl. end-windings and radial ventilation ducts [m]. - Lwact : active length of lamination winding excl. end-windings and radial ventilation ducts [m]. - Lewt : total end-winding length [m]. - Lew : end-winding length on one side for a half-turn - Lwvent : length of lamination winding in the radial ventilation ducts [m] Parameters ---------- self: LamSlotWind a LamSlotWind object Returns ------- L_dict: dict Dictionnary of the length (Lwtot, Lwact, Lew, Lwvent) """ # length of the stack including ventilation ducts L1vd = self.comp_length() # end-winding length on one side for a half-turn Lew = self.winding.comp_length_endwinding() # total end-winding length Ntspc = self.winding.comp_Ntsp(self.slot.Zs) qb = self.comp_number_phase_eq() Lewt = qb * Ntspc * self.winding.Npcp * 4 * Lew # average length of a lamination winding half-turn (one "go" conductor # without "return" conductor) Lwht = L1vd + 2 * Lew # total length of lamination winding incl. end windings [m] Lwtot = qb * Ntspc * self.winding.Npcp * 2 * Lwht # Active length of lamination winding excl. end windings and radial # ventilation duct [m] Lwact = qb * Ntspc * self.winding.Npcp * 2 * self.L1 # length of lamination winding in the radial ventilation duct [m] if self.Nrvd is None or self.Wrvd is None: Lwvent = 0 else: Lwvent = qb * Ntspc * self.winding.Npcp * 2 * self.Nrvd * self.Wrvd return {"Lwtot": Lwtot, "Lwact": Lwact, "Lewt": Lewt, "Lwvent": Lwvent, "Lew": Lew}
d2a2c48f71a55fc48f497c2de3b83cc47ee94194
84,824
def _normalize_profiles(profiles, c_mean): """ Rescale concentration profiles to fluctuations around the average value. """ normalized_profiles = [] for profile, concs in zip(profiles, c_mean): normalized_profiles.append(profile - concs[:, None]) return normalized_profiles
050b74a371ff7325687a59efb76ce20cb5833c53
84,825
def get_number(input): """Return value of hex or decimal numeric input string.""" if input.endswith(('h', 'H')): base = 16 elif input.endswith(('q', 'Q')): base = 8 elif input.endswith(('b', 'B')): base = 2 else: base = 10 if base != 10: number = int(input[:-1], base) else: number = int(input) return number
4a631a0bf54fc3ceadbc47f948f5145ab69c48a4
84,831
def addcss(field, css): """Add css class to formfield""" return field.as_widget(attrs={"class":css})
9fbefb2c472d3b0d4750b5bd0fa5e226902695c3
84,838
def prune_satisfied_clauses(clauses, variables): """Remove any clause that is already satisfied (i.e. is True) from the given clause set. Parameters ---------- clauses: seq Sequence of clauses variables: dict variable name -> bool mapping Returns ------- clauses: seq or None Sequence of clauses that are not yet satisfied. If None, it means at least one clause could not be satisfied """ new_clauses = [] for clause in clauses: evaluated_or_none = variables.satisfies_or_none(clause) if evaluated_or_none is None: new_clauses.append(clause) elif evaluated_or_none is False: return None return new_clauses
c11688df7a1d664c504a2602e5321c8aa6525f29
84,840
def delete_relationship(manager, relationship_id): """ Deletes the relationship. :param manager: Neo4jDBSessionManager :param relationship_id: Internal Neo4j relationship id :return: bool """ q = """ MATCH ()-[r]->() WHERE ID(r) = {relationship_id} DELETE r """ with manager.session as s: s.run(q, {'relationship_id': int(relationship_id)}) return True
8413a261e1780ee221ecf4923b039d34450dbde9
84,841
def equals_auto_tol(x: float, y: float, precision: float = 1e-6) -> bool: """ Returns true if two numbers are equals using a default tolerance of 1e-6 about the smaller one. """ return abs(x - y) < min(x, y) * precision;
7bfabdb43ebd6e662e82b9590e8bf0975b2897d4
84,846
import json def scrub_profile_info(text): """Gigya auth and the account pages contain personal info. Scrub it.""" try: parsed = json.loads(text) except ValueError: return text for i in ['profile', 'billing']: try: if parsed[i]: parsed[i] = 'REDACTED' except (KeyError, TypeError): pass text = json.dumps(parsed) return text
4313fe10b51c4f3c942edcfb3d5d80ddae31bd1d
84,851
def get_train_steps(dataset, train_steps, train_epochs, train_batch_size): """Determine the number of training steps.""" num_train_examples = dataset.num_train_examples return train_steps or ( num_train_examples * train_epochs // train_batch_size + 1)
a78a375583aa82ad69f390c59f5e0ef9c654eb2b
84,854
def open_files(txtfile: str): """Open files with scheduler data input Args: txtfile => File containing scheduler data Returns: mylines => Scheduler data in list format for analysis """ mylines = [] # Declare an empty list named mylines. with open (txtfile, 'rt') as myfile: # Open inputfile for reading text data. for myline in myfile: # For each line, stored as myline, mylines.append(myline) # add its contents to mylines. return mylines
6345985e802abd4a804abc1f8041da2c4f06bf3f
84,858
def get_match_all(*args): """ :param args: each argument is a pattern to be found in the error text :return: regular expression pattern that matches the string only if each argument pattern is found Example: >>> get_match_all("a", "b") '(?is)(?=.*a)(?=.*b)' """ # (?is) means "ignore case" and "dot matches newline character" return "(?is)" + "".join(f"(?=.*{arg})" for arg in args)
4c302d447f75ccd32ef2071c8aeab2b489e8ec59
84,860
def lookup_module_function( module_reference, function_name ): """ Acquires a function reference from a module given an function name. Takes 2 arguments: module_reference - Module whose interface is searched for function_name. function_name - String specifying the function whose handle is sought. Returns 1 value: function_reference - Reference to the requested function. None if function_name is not part of module_reference's interface or if there was an error acquiring the reference. """ # get a reference to the function requested. return None if it isn't part # of the module's interface. function_reference = getattr( module_reference, function_name, None ) return function_reference
5ce046b604b32b0442814d340bf8ac2a6be4c241
84,863
def get_attempt_like_users_for_group(attempt, group_pk): """ Call the get_like_users_for_group method for the attempt. :param attempt: The attempt to get the like users for. :param group_pk: The group pk to pass to the method call. :return: The result of get_like_users_for_group (queryset of Users). """ return attempt.get_like_users_for_group(group_pk)
c7f3372c1bba84b89cd769eca9e4db0f5324b4ca
84,867
import torch def to_mini_mask(rois, boxes): """ Transform ROI coordinates from normalized image space to normalized mini-mask space. """ y1, x1, y2, x2 = rois.chunk(4, dim=1) gt_y1, gt_x1, gt_y2, gt_x2 = boxes.chunk(4, dim=1) gt_h = gt_y2 - gt_y1 gt_w = gt_x2 - gt_x1 y1 = (y1 - gt_y1) / gt_h x1 = (x1 - gt_x1) / gt_w y2 = (y2 - gt_y1) / gt_h x2 = (x2 - gt_x1) / gt_w return torch.cat([y1, x1, y2, x2], dim=1)
40c1681fd66db65fd8eb923f7e05a3909a0978b6
84,875
def set_bypass_csp(enabled: bool) -> dict: """Enable page Content Security Policy by-passing. Parameters ---------- enabled: bool Whether to bypass page CSP. **Experimental** """ return {"method": "Page.setBypassCSP", "params": {"enabled": enabled}}
8111060b1cd4454180796538167ba8179851006c
84,882
import pickle def loads_content(content): """ pickle 反序列化响应对象 :param content: 序列化内容 :return: 响应对象 """ return pickle.loads(content)
1b494198f778235f4661d17540df3e929a1ac082
84,884
def shell_context_processor(self, f): """Registers a shell context processor function. .. versionadded:: 1.0 """ self.shell_context_processors.append(f) return f
366c99a931784ec3a09bb0e593eb456898efffe8
84,888
def recv_msg(socket): """receive a message from a socket""" return socket.recv(4096).decode('utf8')
0e1872c522b0894405363f2ef8c6d109f72a0a55
84,890
def get_int_input(message: str): """ A simple function to get int inputs from the user. :param message: str | Message that goes into the prompt :return int | int input """ while True: try: int_input = int(input(message)) return int_input except ValueError: print("Wrong Input! Please enter a number. ")
77274fe1012d06dc4381f46a1a7f6c26728d6745
84,891
import requests def _post_request(url, token, body=None): """ Send a requests.post request :param url: URL :param token: authorization token :param body: body to be sent with request :return: json of response """ headers = { 'Authorization': 'bearer ' + token } if body is None: res = requests.post(url, headers=headers) else: res = requests.post(url, headers=headers, data=body) if res.status_code == 200: pass else: pass return res.json()
adb1923bb18f21b98356bc4bc7fd7e79b59e2dfb
84,893
def IpDecimalToBinary(decimal_ip, binary_size=32): """ :param decimal_ip: IPv4 in decimal notation, e.g. 167772161 :param binary_size: IP size in binary, default is 32 for IPv4 :return: IPv4 in binary notation, e.g. 00001010000000000000000000000001 """ return ('0'*binary_size + bin(decimal_ip)[2:])[-binary_size:]
0080c445055834d197ff3fc38096edcd96b4f16b
84,900
def get_xoutput_ref(self): """Return the reference XOutput (or Output) either from xoutput_ref or output_list Parameters ---------- self : XOutput A XOutput object Returns ------- xoutput_ref: XOutput reference XOutput (or Output) (if defined) """ if self.xoutput_ref_index is not None: return self.output_list[self.xoutput_ref_index] else: return self.xoutput_ref
5c541c98e3e90752c47b22d9a11fe972d5d4c60b
84,901
def hurdle_race(k, height): """Hackerrank Problem: https://www.hackerrank.com/challenges/the-hurdle-race/problem Hurdle race calculates how many doses the person must jump after their natural jump height k, given the hurdles. In short, we take the highest hurdle and subtract the jump k, and if it's negative, we return 0 as it means the runner can already clear the highest hurdle. Otherwise, we know the difference is the number of doses needed to clear the highest hurdle. Args: k (int): the natural jump height of the runner height (list): list of integer hurdle heights the runner must clear Returns: int: number of doses needed to clear highest hurdle """ return max(max(height) - k, 0)
2c12ab7d24fbeb7ad203b93f60efe3ee824c6aef
84,902
def hass_to_myhomeserver_brightness(value: int): """Convert hass brightness (0..100) to MyHomeSERVER format (0..255)""" return int((value / 255.0) * 100)
40b5af3aa4717ae867e2a2e658476a33427fa68f
84,906
import torch def ohe(input_vector, dim, device="cpu"): """Does one-hot encoding of input vector.""" batch_size = len(input_vector) nb_digits = dim y = input_vector.reshape(-1, 1) y_onehot = torch.FloatTensor(batch_size, nb_digits).to(device) y_onehot.zero_() y_onehot.scatter_(1, y, 1) return y_onehot
1b0406471d3755cf2f99d23f6873727b0986036a
84,907
import re def to_snake_case(s: str) -> str: """ Convert camel-cased ``s`` to snake case. :param s: string to be converted :returns: `s` in snake case """ return re.sub(r"(?!^)([A-Z]+)", r"_\1", s).lower()
23ac30adabac416ce18a47605ab5962ee914ae1e
84,908
import re def pid_valid(passport): """ Check that pid is valid pid (Passport ID) - a nine-digit number, including leading zeroes. :param passport: passport :return: boolean """ return bool(re.match(r'[\d]{9}$', passport['pid']))
8a63dc25fdc56d88d5e7c2233d148610d949e285
84,909
def round_robin_strategy(num_tasks, last_task=None): """A function for sampling tasks in round robin fashion. Args: num_tasks (int): Total number of tasks. last_task (int): Previously sampled task. Returns: int: task id. """ if last_task is None: return 0 return (last_task + 1) % num_tasks
ce34a5662e9987e89af83b28126e78b3ba137493
84,910
def _add_poobah(poobah, extended=True): """ used by beta_MDS_plot to categorize poobah failure ranges for colormap. - Default returns 7 groups (0-30%). But 5 groups (0-20%) also an option with 'extended=False'. - Returns: a df with sample names in index and failure % in a column. """ #if poobah.isna().sum().sum() > 0: # LOGGER.warning("Your poobah_values.pkl file contains missing values; color coding may be inaccurate.") # this happens normally with qualityMask True percent_failures = round(100*( poobah[poobah > 0.05].count() / poobah.count() ),1) percent_failures = percent_failures.rename('probe_failure_(%)') # Series.where will replace the stuff that is False, so you have to negate it. percent_failures_hues = percent_failures.where(~percent_failures.between(0,5), 0) percent_failures_hues.where(~percent_failures_hues.between(5,10), 1, inplace=True) percent_failures_hues.where(~percent_failures_hues.between(10,15), 2, inplace=True) percent_failures_hues.where(~percent_failures_hues.between(15,20), 3, inplace=True) if extended: percent_failures_hues.where(~percent_failures_hues.between(20,25), 4, inplace=True) percent_failures_hues.where(~percent_failures_hues.between(25,30), 5, inplace=True) percent_failures_hues.where(~(percent_failures_hues > 30), 6, inplace=True) percent_failures_hues = percent_failures_hues.astype(int) percent_failures_hues = percent_failures_hues.replace({0:'0 to 5', 1:'5 to 10', 2:'10 to 15', 3:'15 to 20', 4:'20 to 25', 5:'25 to 30', 6:'>30'}) legend_order = ['0 to 5','5 to 10','10 to 15','15 to 20','20 to 25','25 to 30','>30'] else: percent_failures_hues.where(~(percent_failures_hues > 20), 4, inplace=True) percent_failures_hues = percent_failures_hues.astype(int) percent_failures_hues = percent_failures_hues.replace({0:'0 to 5', 1:'5 to 10', 2:'10 to 15', 3:'15 to 20', 4:'>20'}) legend_order = ['0 to 5','5 to 10','10 to 15','15 to 20','>20'] return percent_failures_hues, legend_order
d2becb8034dd6d009ad6ead1d9e889fe03b4c840
84,913
def linearlyInterpolate(v0, v1, blend=.5): """Get the vector interpolated between 2 vectors. Arguments: v0 (vector): vector A. v1 (vector): vector B. blend (float): Blending value. Returns: vector: The interpolated vector. """ vector = v1 - v0 vector *= blend vector += v0 return vector
d7d37cca171772364798ed0966f929a5c8fcc875
84,917
from typing import List from typing import Tuple def _update_lumps_headers(lumps: List[Tuple[int, int, int, bytes]], lumps_after_update: List[int], sizediff: int): """Return a lumps headers list with new file offsets after performing update. Also updates lump length in the header which is being updated. """ updated_lumps = list(lumps) before_lump = lumps[lumps_after_update[0]] updated_lumps[lumps_after_update[0]] = before_lump[0] + sizediff, before_lump[1] + sizediff, *before_lump[2:] for lump_idx in lumps_after_update[1:]: before_lump = lumps[lump_idx] updated_lumps[lump_idx] = before_lump[0] + sizediff, *before_lump[1:] return updated_lumps
a9b3cd4a84e2ff08352d55a795d2bfff6bc2a1c2
84,924
import functools def memoize(func, cache, num_args): """ Wrap a function so that results for any argument tuple are stored in 'cache'. Note that the args to the function must be usable as dictionary keys. Only the first num_args are considered when creating the key. """ @functools.wraps(func) def wrapper(*args): mem_args = args[:num_args] if mem_args in cache: return cache[mem_args] result = func(*args) cache[mem_args] = result return result return wrapper
b12ff23b822d18a3a0e6a5f6db497d8e67114369
84,932
def collection_id(product: str, version: str) -> str: """Creates a collection id from a product and a version: Args: product (str): The MODIS product version (str): The MODIS version Returns: str: The collection id, e.g. "modis-MCD12Q1-006" """ return f"modis-{product}-{version}"
d6a8df291210a2644904447a1687b75b2b614fc3
84,935
def get_simple_field(value): """ Returns empty string if value is None, since many fields don't accept null values """ return '' if value is None else value
607d98e9c8e021109ead62ed4dee9ea3e04f8436
84,937
def conv_C2F(c): """Convert Celsius to Fahrenheit""" return c*1.8+32.0
416c25e23c5b944a7cdc1d762140ed2f34737ee9
84,942
import copy import six def _remove_empty(entry): """Remove any empty values and empty lists from entry.""" new_entry = copy.deepcopy(entry) for key, value in six.iteritems(new_entry): if isinstance(value, dict): new_entry[key] = _remove_empty(value) emptykeys = [ key for key, value in six.iteritems(new_entry) if not value ] for key in emptykeys: del new_entry[key] return new_entry
d8a87bf58d5d776e34e8d64d7d1dfe2655a1aed6
84,947
def check_overlap(samp1, samp2): """ Compute the overlap between two samples """ nmatch = 0 for cname in samp2: if cname in samp1: nmatch += 1 return nmatch
e5b17fda0d020abbee7f0ca044136e4c4934b8eb
84,950
def snake_to_camel(txt): """Return camel case from snake case.""" parts = txt.split('_') return parts[0] + ''.join([p.capitalize() for p in parts[1:]])
a544e5f4ff612b44f16e44d88333f3ee109a441f
84,953
import random def TestModels(model1, model2, temp, numTests): """ Base function for playing a BlackBird instance against another model. Args: `model1`: The Blackbird model to test. `model2`: The model to play against. `temp`: A float between 0 and 1 determining the exploitation temp for MCTS. Usually this should be close to 0.1 to ensure optimal move selection. `numTests`: An int determining the number of games to play. Returns: An integer representing a win (1), draw (0), or loss (-1) """ for _ in range(numTests): model1ToMove = random.choice([True, False]) model1Player = 1 if model1ToMove else 2 winner = None model1.DropRoot() model2.DropRoot() state = model1.Game() while winner is None: if model1ToMove: (nextState, *_) = model1.FindMove(state, temp) else: (nextState, *_) = model2.FindMove(state, temp) state = nextState model1.MoveRoot(state) model2.MoveRoot(state) model1ToMove = not model1ToMove winner = state.Winner() if winner == model1Player: return 1 elif winner == 0: return 0 else: return -1
64c1b64a670fc982b2ffdb4dbb4a6e2273d01cfd
84,954
def get_cov(pileup, raise_error_if_0x=False): """Returns the sum of matches + mismatches for a position. Optionally raises an error if this particular pileup has a (match + mismatch) coverage of 0. (The error will also come up if through some twist of fate this pileup has a coverage of less than 0. If that happens, ...try calling an exorcist?) """ cov = sum(pileup[0]) if raise_error_if_0x and cov <= 0: raise ValueError(f"pileup {pileup} has coverage of {cov}x.") else: return cov
b35459d7f42873f696aa1f4cdb98f4dbecbf3633
84,955
def bool_helper(x): """ Returns True if the value is something that can be mapped to a boolean value. :param x: the value to check :return: the mapped boolean value or False if not mappable """ return x in [1, '1', 'true', 'True']
13479d57d1ce3e24aa6455d37c4031e1ebaa592e
84,956
from typing import Coroutine def coro_ident(coro: Coroutine): """Extract an identifier for a coroutine.""" return coro and (hasattr(coro, "__qualname__") and coro.__qualname__ or repr(coro))
6a28f6df8a446d9ad82277b53ed9833a5833963f
84,959
def green(text): """ Return this text formatted green """ return '\x0303%s\x03' % text
65473761d5f41052b4b164ace2496162e7f90dc5
84,962
import re def parse_show_access_list_hitcounts_ip_interface(raw_result): """ Parse the 'show access-list hitcounts ip test12 interface 1' command raw output. If no count on ACE, hit_cnt is '-'; for traffic not matching an ACE, hit_count is 0 :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the show access-list command \ in a dictionary of the form. Returns None if empty dictionary: :: { '50 permit any 10.0.10.1 10.0.10.2': '10' } """ show_re = ( r'\s+(?P<hit_count>[-\d]+)\s+(?P<rule>.*$)' ) result = {} count = 0 for line in raw_result.splitlines(): count = count + 1 if count <= 2: continue re_result = re.search(show_re, line) if re_result: data = re_result.groupdict() result[data['rule']] = data['hit_count'] if result == {}: return None else: return result
dc5bedba021ac77be4eb976ed8b25721b15e17ee
84,963
def get_params(line): """ Gets the parameters from a line. @ In, line, string, The line to parse @ Out, (name,params), (string,list), The name of the parameter and a list of parameters. """ equalsIndex = line.index("=") name = line[:equalsIndex].strip() params = line[equalsIndex + 1:].strip().strip('\"\'').split() return name,params
4220284130907b845faa0831d13dc3cc5fb3f992
84,968
from typing import Dict def find_security(content: Dict, scheme: str) -> bool: """Check if security scheme is used in the provided content. Arguments --------- content OpenAPI document to be cleaned up. scheme Security scheme to be searched. Returns ------- Flag determining presence of the security scheme in the content. """ if isinstance(content, list): for item in content: if find_security(item, scheme): return True if isinstance(content, dict): for key, value in content.items(): if key == 'security': for security in value: if isinstance(security, dict) and scheme in security: return True if find_security(value, scheme): return True return False
0fe2485893fbf520cce10c7002346792bdd541e1
84,972
def parse_int(v, fallback_to=0): """Parses a string as an integer value. Falls back to zero when failed to parse.""" try: return int(v) except ValueError: return fallback_to
821f61a70f5b5db239e08736483d96ca3c425297
84,973
import re def _find_page_sizes(text): """Finds page sizes by the MediaBox attribute. Returns a list of width,height-tuples. Hack - this assumes that only pages have a MediaBox attribute. """ re_mediabox = re.compile(r"/MediaBox \[(\d+ \d+ \d+ \d+)\]") sizes = [] for m in re_mediabox.finditer(text): x,y,w,h = [int(item) for item in m.group(1).split(" ")] sizes.append((w,h)) return sizes
03e3d3d22ef672d0b24f986ea4056fa2db2cf808
84,976
def get_buckets(self): """ Documentation: --- Description: Associate each S3 bucket name with its S3 bucket object. --- Returns: raw_bucket : dict Dictonary where the keys are S3 bucket names and the values are the associated S3 bucket objects. """ # gather S3 bucket object for each S3 bucket name raw_buckets = {} for instance in self.s3_resource.buckets.all(): raw_buckets[instance.name] = instance return raw_buckets
20e3b484de24a247cb9998ed4292c7971ca91335
84,982
def metric_delta_g(entity, schedule): """ Compute the factor ∆g for the current schedule and the reference schedule. Parameters ---------- entity : ElectricalEntity The Entity to calculate the delta g metric for. schedule : str Referenced Schedule - 'default' : Normal schedule - 'ref' : Reference schedule Returns ------- float : Factor ∆g. Notes ----- - Implementation as given in the lecture "Elektrizitaetswirtschaft" by Prof. Dr.-Ing. Christian Rehtanz from TU Dortmund, Germany. """ p_el_min_dsm = min(entity.p_el_schedule) p_el_max_dsm = max(entity.p_el_schedule) p_el_min_ref = min(entity.schedules[schedule]["p_el"]) p_el_max_ref = max(entity.schedules[schedule]["p_el"]) g = 1.0 - (abs(p_el_max_dsm - p_el_min_dsm) / abs(p_el_max_ref - p_el_min_ref)) return g
a8ba59085e062d745f457d768b3fa504cf1f01bf
84,984
def safety_check_first_line(first_line: str) -> None: """Inspects first line of lineage_notes.txt to perform safety check. We pull all of our Pango lineages from a human-edited .txt file. The format has been stable so far, but if things ever change significantly, the planned loading process will probably explode. In case of changes, we will need to investigate and alter the loading process. This check exists to avoid ever accidentally loading a file where the format has (probably) changed. Assumption is that if the first line has changed from what it used to be, the file format has probably changed. Will print the problem and raise an exception. Raises: RuntimeError -- If first line of file not what was expected. """ EXPECTED_FIRST_LINE = "Lineage\tDescription\n" if first_line != EXPECTED_FIRST_LINE: print("First line of imported lineages file has changed!") print("Loading script was originally written for previous version.") print(f"Expected: '{EXPECTED_FIRST_LINE}'") print(f"Actually got first line: '{first_line}'") print("Very likely you need to rewrite loading script. Aborting.") raise RuntimeError("Format of lineage file has likely changed") return None
63246beb9e3eea515297ed30f83cd5462517e475
84,985
import pickle def identity_serialize_deserialize(thing, the_class): """ Take the thing, serialize it to a dict of basic types, pickle it. Then unpickle the result and deserialize it to the original object. Return the result. This whole operation should be equivalent to identity. It "shouldn't change" the thing. :param thing Serializable: the thing to test :param the_class type: final type of serializable :return the_class: the deserialized object """ with open('env.pkl', 'wb') as f: data = thing.serialize() pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL) with open('env.pkl', 'rb') as f: serialized = pickle.load(f) return the_class.deserialize(serialized)
a0be6d2a6f6de4e60079721b1a447cd29e1a0c42
84,986
import re def wrap_slice(rule_slice, status): """Wrap the changed rules text in tags for JSON to parse. This allows us to highlight changes programmatically. Javascript can replace the old_start, new_start, etc tags with their proper <span> tags in the generated HTML. Easier to handle there than here. Note this does NOT differentiate between inserts, replaces, deletions like a general diff tool. Keyword arguments: rule_slice -- the chunk of the rule that indicates a change status -- whether the slice belongs to an 'old' rule or a 'new' rule. """ if not rule_slice: return '' if re.match('^^(?:rules? )?' '\d{3}(?:\.\d+[a-z]*)*' '(?:–\d{3}(?:\.\d+[a-z]?)?)?\)?\.?', ' '.join(rule_slice)): return rule_slice if status == 'old': return ['old_start', *rule_slice, 'old_end'] else: return ['new_start', *rule_slice, 'new_end']
42db80c6ef0d5c21c10daec4a38625012cd8e448
84,987
import yaml def load_TS_class(filename, print_info=True): """ Load parameters and rules from yaml file to create time series. Returns dict. """ with open(filename, 'r') as ymlfile: TS_def = yaml.load(ymlfile, Loader=yaml.SafeLoader) if print_info: print(TS_def['class_name']) print(TS_def['description']) print('n_channels:', TS_def['n_channels']) print('n_timepoints:', TS_def['n_timepoints']) return TS_def
f316ae454fd652d0afd5ca24f055c646617a7879
84,988