content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import Union import types import importlib def import_if_installed( name: str ) -> Union[types.ModuleType, ModuleNotFoundError]: """Import a module if it is installed. Otherwise return a :class:`ModuleNotFoundError` Args: name (str): Module name Returns: module or ModuleNotFoundError: The module, if it is found. Otherwise a :class:`ModuleNotFoundError` instance""" try: module = importlib.import_module(name) except ModuleNotFoundError as e: return e else: return module
2c3f365375f512da88e3db50a4f994205571d9b8
85,302
def circleBox(x,y,r): """Return the coordinates of the box enclosing a circle.""" return x-r,y-r,x+r,y+r
a6efc46f20bcdcce373abbc3bc9a7c7207bdc7c7
85,306
import torch def get_device(args): """ Determine the device to use for the given arguments. """ if args.gpu_id >= 0: return torch.device('cuda:{}'.format(args.gpu_id)) else: return torch.device('cpu')
b5fcd2e192eee7d3b7d1512352a14f36054d1079
85,307
def identity(elem): """Return the argument itself.""" return elem
c8ed88d9ccf4408ed2e91a9206001fc09c0fe7a1
85,310
def get_pokemon_ids(cur): """Returns a list of pokemon (species) ids (as ints) sorted in increasing order as stored in the database. Args: cur: an open sqlite3 cursor created from a connection to a pokemon db """ new_list = [] query = ('SELECT species_id FROM pokemon') cur.execute(query) data = cur.fetchall() for element in data: new_list.append(element[0]) return sorted(new_list)
462825c871c3537e29a48b0d41f2960b5093d791
85,313
def comp_points(point1): """ Compare two points. Points are considered the same if their corresponding row values and col values are equal. @param {point} point1 First point @param {point} point2 Second point @return {Boolean} true if x and y are the same, false otherwise """ def _inner1(point2): return (point1["row"] == point2["row"]) and ( point1["col"] == point2["col"]) return _inner1
12b4f79f8f2f83d4f55aaecf48c8cf7cef86e0ae
85,314
def get_music_volume(data: dict) -> float: """Check the user's music volume preference.""" return data.get('music_vol', 0.5)
5324543335cfc8c46d08bf8b9e97b2f71d3bcea1
85,318
def convert_mac_colon_to_dot_format(mac_addr): """ Convert mac address in colon format to dot format For e.g convert aa:bb:cc:dd:ee:ff to aabb.ccdd.eeff Args(str): mac address in colon format Returns(str): mac address in dot format """ mac = mac_addr.split(":") mac_addr_dot = "".join(mac[0:2]) + "." + "".join(mac[2:4]) + "." + "".join(mac[4:6]) return mac_addr_dot
3732768799b2259962ea4063cffedd407a16094e
85,326
def states_hash(states): """Generate a hash of a list of states.""" return "|".join(sorted(states))
50308bc2199aa9a295af9416d6fcd6142fa8e334
85,332
def startup_fuel_burn_rule(mod, g, tmp): """ Startup fuel burn is applied in each timepoint based on the amount of capacity (in MW) that is started up in that timepoint and the startup fuel parameter. """ return mod.GenCommitCap_Startup_MW[g, tmp] \ * mod.startup_fuel_mmbtu_per_mw[g]
94f13e07b2727ec41a55b5761ac9af5f95ff3866
85,335
def OR(*args) -> str: """ Creates an OR Statement >>> OR(1, 2, 3) 'OR(1, 2, 3)' """ return "OR({})".format(",".join(args))
95814aa2e6d99d33e79964a16ec0cb9a0294a838
85,336
from typing import Tuple def _precision_recall_f1_from_tp_tpfp_tpfn( tp: int, tpfp: int, tpfn: int ) -> Tuple[float, float, float]: """ Computing precision, recall and f1 score Args: tp: number of true positives tpfp: number of true positives + false positives tpfn: number of true positives + false negatives Returns: Precision, Recall and F1 score """ precision, recall, f1_score = 0.0, 0.0, 0.0 if tpfp: precision = tp / tpfp if tpfn: recall = tp / tpfn if precision and recall: f1_score = 2 * precision * recall / (precision + recall) return precision, recall, f1_score
4d4d96c69469b21b32e23dedeb883e50608945e6
85,340
import binascii def esp8266_crc32(data): """ CRC32 algorithm used by 8266 SDK bootloader (and gen_appbin.py). """ crc = binascii.crc32(data, 0) & 0xFFFFFFFF if crc & 0x80000000: return crc ^ 0xFFFFFFFF else: return crc + 1
f747109c42cb663d8ce646f4029ddfd3f37819f3
85,341
def create_dicts_w_info(df, table_visit_diff_string, bad_records_string='num_bad_records'): """ This function is used to create a dictionary that can be easily converted to a graphical representation based on the values for a particular dataframe Parameters ---------- df (dataframe): dataframe that contains the information to be converted table_visit_diff_string (string): the column that is used to calculate the 'average' difference between a date of interest and the visit start date. for instance, this would allow someone to specify the difference between the observation date and the visit start date. bad_records_string (string): the column of the dataframe whose rows will be summed and then converted to the keys of a dictionary. for instance 'num_bad_records' is often used to show the total number of 'bad' (discrepant) records for a particular site Returns ------- num_bad_records (dictionary): has the following structure keys: the HPOs values: the total number of 'bad' (discrepant) records for the particular column of interest table_visit_diff_dict (dictionary): has the following structure keys: the HPOs values: the 'average' difference between the two types of dates as specified by the table_visit_diff_string parameter. NOTE: this 'average' distance is ONLY for the erroneous records (and is not swayed by records where the two dates are concordant) """ hpos = df['src_hpo_id'].unique().tolist() site_dictionaries = {} for hpo in hpos: sample_df = df.loc[df['src_hpo_id'] == hpo] sample_df.loc["Total"] = sample_df.sum(numeric_only=True) hpo_dict = sample_df.loc["Total"].to_dict() site_dictionaries[hpo] = hpo_dict tot = 0 num_bad_records = {} for hpo, info in site_dictionaries.items(): num_bad_records[hpo] = info[bad_records_string] table_visit_diff_dict = {} tot_rec, tot_diff = 0, 0 for hpo, info in site_dictionaries.items(): bad_records = info[bad_records_string] difference = info[table_visit_diff_string] tot_rec += bad_records tot_diff += difference avg = round(difference / bad_records, 2) table_visit_diff_dict[hpo] = avg table_visit_diff_dict['Total'] = round(tot_diff / tot_rec, 2) return num_bad_records, table_visit_diff_dict
c5a6e8d4733b9433167c394a29f1ccf38f4fd9d2
85,342
def get_payment_request_for_wills(will_alias_quantity: int = 1): """Return a payment request object for wills.""" return { 'businessInfo': { 'corpType': 'VS' }, 'filingInfo': { 'filingTypes': [ { 'filingTypeCode': 'WILLNOTICE' }, { 'filingTypeCode': 'WILLALIAS', 'quantity': will_alias_quantity } ] } }
30d9944adedff7a4f3aef287b3b9f3d544899977
85,350
import string import random def random_string(length=10): """ Creates a random string Args: length (int): length of string to generate, default 10 returns: string raises: None """ random_string = "" # Choose from lowercase, uppercase, and digits alphabet = string.ascii_lowercase + string.ascii_uppercase + string.digits for n in range(0, length): random_string += random.choice( alphabet ) return random_string
2044cd33fec8c05f34d076d85619d8c2af5bafa0
85,356
def get_number_available(product_information_table: list) -> str: """Return number of books available.""" number = product_information_table[5].text return "".join([character for character in number if character.isdigit()])
ab39cd19b36d53997e22485fbebfab63cd8202eb
85,358
def return_txt(fn: str) -> list: """ Opens a file and returns the whole file as a list Args: fn (str): File name to open Returns: list: return whole file as a list """ try: with open(fn, "r") as f: return f.readlines() except FileNotFoundError as e: print(f"File not Found :: {e}") return []
9d275ebe772c00ee6ea0f579f653763b26168bce
85,361
def upload_receive(request): """ Returns the file(s) uploaded by the user. """ return request.FILES['files[]'] if request.FILES else None
3faab02b038baee2fd65fdd82ec615a8539f78ce
85,362
def s(j, k): """ Subtract two values """ d = j - k return d
c1204af6012a0493e27136ddfdeec50420eeb5e0
85,365
def get_all_positives(df_merged): """Counts all positive examples in the groundtruth dataset.""" return df_merged[df_merged["label_groundtruth"] == "SPEAKING_AUDIBLE"]["uid"].count()
d2b47368d71e3d9ca2315b514f610fac99b97213
85,366
def parse_rawdata(rawdata): """Make a dictionary from the RawData attribute.""" out = {} for entry in rawdata.split(";"): key, value = entry.split("=", maxsplit=1) out[key] = value return out
bf5232eb909e6bb22d204cffa5878f16d9618e12
85,369
def make_info_string(sep=',', **kwargs): """ Construct an information string in the following view: key1: value1[sep]key2: value2[sep][(keyN: valueN)] params: sep : a separator between instances. Possible values: ',', '\n' **kwargs : params """ if sep not in [',', '\n']: ValueError("Wrong separator: {}. 'sep' must be: ',' or '\n'".format(sep)) info_str = "" for key, value in kwargs.items(): info_str += "{0}: {1}{2} ".format(key, value, sep) info_str = info_str[:-2] if info_str[-2] == ',' else info_str[:-1] return info_str
9afab0de5ac8ea4b27e7945234308e1952fc174d
85,370
def get_item_count_dictionary_from_list(generic_list): """ Given a list of items returns a dictionary containing its counts :param generic_list: List containing the items :return: Dictionary containing the item counts """ generic_dict = dict({}) for element in generic_list: if element in generic_dict.keys(): generic_dict[element] += 1 else: generic_dict[element] = 1 return generic_dict
020ce7203aa9b19ca1f6c9c20a5f68319ebb87ad
85,373
def palindromic_age_check(diff, show=False): """ compute the ages that are palindromic with a difference of fixed years, and return the count of occurrences in a century Parameters: diff :int -> number of the years between ages show :bool -> indicate if the ages will be printed """ count = 0 for i in range(1, 100): child_age = str(i).zfill(2) mon_age = str(i + diff).zfill(2) if child_age[::-1] == mon_age: if show: print('child {}, mother {}'.format(child_age, mon_age)) count += 1 return count
75e6816ec4bcec54269b99175c9fa1692ab2f03b
85,376
def is_consecutive_rows(lst): """Check if a list of integers is consecutive. Args: lst (list): The list of integers. Returns: True/False: If the list contains consecutive integers. Originally taken from and modified: http://stackoverflow.com/ questions/40091617/test-for-consecutive-numbers-in-list """ assert 0 not in lst, '0th index is invalid!' lst = list(set(lst)) if not lst: return True setl = set(lst) return len(lst) == len(setl) and setl == set(range(min(lst), max(lst) + 1))
eb4b1d1e04fa84a1fb1addc42d5029f459045ace
85,382
def make_vmap(ru,rd): """return a dict mapping ids in the notation to their rust variable names""" r = {x:'v'+x[0] for x in "z0 z1 z2 z3 a0 a1 a2 a3".split()} r.update({x:'vu' for x in ru}) r.update({x:'vd' for x in rd}) return r
33f086cbc5f1884fb1c0091bf8122d5faa8716ff
85,384
def finding_to_string(finding): """Create a string representation of a finding.""" # pylint: disable=W0142 return "{0} '{1}'\n {2}: {3}\n {4}".format(*finding)
84e2fd5500295f72a94d0764008240ccaea75998
85,385
def get_all_items(list_widget, as_string=True): """ Gets all the items in a listWidget as a list :param list_widget: your QListWidget :param as_string: <bool> if set to true, will return the text of the item. If set to false will return the actual QListWidgetItem :return: items of your QListWidget """ items = [] if as_string is True: for item in [list_widget.item(i).text() for i in range(list_widget.count())]: if item is not None: items.append(item) else: for i in range(list_widget.count()): items.append(list_widget.item(i)) return items
1b3fe7c8660075d65c28ef8e235359e56d3b5e7d
85,386
def parse_args(parser): """ Parse commandline arguments. """ parser.add_argument("--trtis_model_name", type=str, default='waveglow', help="exports to appropriate directory for TRTIS") parser.add_argument("--trtis_model_version", type=int, default=1, help="exports to appropriate directory for TRTIS") parser.add_argument('--fp16', action='store_true', help='inference with mixed precision') return parser
e1530a7f86305e6a76f76176e0e2e26137779722
85,393
def _file_content_to_str(file_path): """ Converts a file content to a string. Args: file_path (str): the path of the file. Return: str: file content as string """ with open(file_path) as file: return file.read()
00406994f60336b42b1a1690c41d28553bb6a17c
85,397
import re def get_hostname(url: str): """Extract single hostname from the nested url Parameters ---------- url : str Returns ------- str The site contains protocol and hostname """ return re.sub(r"(.*://)?([^/?]+).*", "\g<1>\g<2>", url)
31482ba8d29c745754f28e910e12493915ece9f7
85,401
def atom_idx_from_bonds(topology, idx): """Finds indices of atoms bonded to a given atom. Simply loops over all bonds in topology and checks whether atom with given index is involved. Returns a list of indeces of all atoms bonded to the atom with given index. """ # list of indeces of bonded atoms: idx_bonded = [] # check all bonds: for bond in topology.bonds(): if idx == bond.atom1.index: idx_bonded.append(bond.atom2.index) if idx == bond.atom2.index: idx_bonded.append(bond.atom1.index) # return indeces of bonded atoms: return(idx_bonded)
4c77455b4ecede60f06b4cd45e1b84b84e25e32e
85,407
def _assemble_triangles(animal_obj): """ Computes a basic triangulation on the regular coordinates of an animal. Parameters ---------- animal_obj : Animal() object Initialized with regular coordinates set/updated Returns ------- list of triples of ints Specifying the indices of the vertices for each triangle in the triangulation of a surface. """ #store relevant parameters num_x_grid, num_y_grid = animal_obj.get_num_grids() #initialize triangle list triangles = [] #iterate through lower left corners of grid and append canonical triangles for i in range(num_x_grid-1): for j in range(num_y_grid-1): triangles.append([i*num_y_grid+j, (i+1)*num_y_grid+j, (i+1)*num_y_grid+(j+1)]) triangles.append([i*num_y_grid+j, (i+1)*num_y_grid+(j+1), i*num_y_grid+(j+1)]) return triangles
fc6481a4322c4375b9348d1dbe9f3a690dc7b602
85,410
def length_func(list_or_tensor): """ Get length of list or tensor """ if type(list_or_tensor) == list: return len(list_or_tensor) return list_or_tensor.shape[0]
18781bb0f957e9e2b88a2b2596e44ab91f8583a1
85,414
def GetResourceName(uri): """Returns the name of a GCP resource from its URI.""" return uri.split('/')[3]
a97647b65158ff2bede6cc3f0baf468714cce429
85,417
def data_encode_TF(anonymized_dataset, target, paths_to_leaves, is_cat): """ Implement TF encoding, that is: - For continuous attributes: if the target value is inside an interval, then set the related entry to 1. Otherwise, to 0. - For categorical attributes: if the target value is in the path from a category to an leaf, then set the related entry to 1. Otherwise, to 0. """ qi_num = len(anonymized_dataset[0]) - 1 encoded_data = [] for i in range(len(anonymized_dataset)): record = [] index = 0 for j in range(qi_num): if is_cat[j]: if anonymized_dataset[i][j] in paths_to_leaves[index][target[j]]: record.append(1) else: record.append(0) index += 1 else: temp = anonymized_dataset[i][j].split(',') if len(temp) == 1: low = high = temp[0] else: low = temp[0] high = temp[1] if low <= target[j] <= high: record.append(1) else: record.append(0) record.append(anonymized_dataset[i][-1]) encoded_data.append(record) return encoded_data
90fb13047547a9d4f7f849d4e1c33406510ffaa9
85,420
def reinchenbag_implication(x, y): """Performs pairwise the reinchenbag implication.""" return 1 - x + x * y
aff3f88a7b14ce90d8afbb25a9b3bf8f9113c754
85,421
def expo_search(ternary_search_function): """ Find how much iterations does it take for `ternary_search` to find the answer. Iteratively increase the `max_iterations` parameter by a factor of 2 until we find the ternary search starts to give a valid answer. After that, just binary search within that range. """ num_iterations_lower_bound = 1 num_iterations_upper_bound = num_iterations_lower_bound while True: search_result = ternary_search_function( max_iterations=num_iterations_upper_bound ) if search_result != -1: break num_iterations_upper_bound *= 2 # We can increment the lower bound here as well! num_iterations_lower_bound = num_iterations_upper_bound // 2 # Now just do binary search while num_iterations_upper_bound >= num_iterations_lower_bound: mid = (num_iterations_lower_bound + num_iterations_upper_bound) // 2 evalu = ternary_search_function(max_iterations=mid) if evalu == -1: num_iterations_lower_bound = mid + 1 else: num_iterations_upper_bound = mid - 1 return num_iterations_lower_bound
8857d68024e1c42c01334b2f7e35fdd1aa369301
85,423
def extract_bc_matrices(mats): """Extract boundary matrices from list of ``mats`` Parameters ---------- mats : list of list of instances of :class:`.TPMatrix` or :class:`.SparseMatrix` Returns ------- list list of boundary matrices. Note ---- The ``mats`` list is modified in place since boundary matrices are extracted. """ bc_mats = [] for a in mats: for b in a.copy(): if b.is_bc_matrix(): bc_mats.append(b) a.remove(b) return bc_mats
1eb35eebe29cf7dba47d176714eb49915d29917b
85,424
def transverseWidgets(layout): """Get all widgets inside of the given layout and the sub-layouts""" w_lists = [] for i in range(layout.count()): item = layout.itemAt(i) if item.layout(): # if type(item) == QtGui.QLayoutItem: w_lists.extend(transverseWidgets(item.layout())) # if type(item) == QtGui.QWidgetItem: if item.widget(): w = item.widget() w_lists.append(w) if w.layout(): w_lists.extend(transverseWidgets(w.layout())) return w_lists
57c6d12e901f5248eb3e08b3d6f8e131d4e16a64
85,428
import re def decode_addresses(addrs): """ Find all IP address-port pairs in the given string. The convention follows the definitions in RFC3986. For IPv4 it's: `xxx.xxx.xxx.xxx:xxxx`, and for IPv6: `[xxxx::xxxx]:xxxx`. """ exp = re.compile(r""" [\s]* # whitespaces ( ((?P<IPv4>[\d.]+):(?P<portv4>\d+))| # IPv4 (\[(?P<IPv6>[A-Fa-f0-9:\.]+)\]:(?P<portv6>\d+)) # IPv6 ) """, re.VERBOSE) start = 0 addresses = [] while True: match = exp.match(addrs, start) if not match: break start = match.end() if match.group('IPv4'): addresses.append((match.group('IPv4'), int(match.group('portv4')))) else: addresses.append((match.group('IPv6'), int(match.group('portv6')))) return addresses
0570ea1dad6177d12dea3d9c6b1d9c86dd44b638
85,433
import torch def combined_masks(action_mask,betsize_mask): """Combines action and betsize masks into flat mask for 1d network outputs""" if action_mask.dim() > 2: return torch.cat([action_mask[:,:,:-2],betsize_mask],dim=-1) elif action_mask.dim() > 1: return torch.cat([action_mask[:,:-2],betsize_mask],dim=-1) else: return torch.cat([action_mask[:-2],betsize_mask])
7b4676ff4701689403721e0d6cee8a6b97dfa4e2
85,448
def force_bool(in_bool): """Force string value into a Python boolean value Everything is True with the exception of; false, off, and 0""" value = str(in_bool).lower() if value in ('false', 'off', '0'): return False else: return True
8281b94c9b8e2eae30b7d234d21c33e63e75db63
85,449
def get_dependencies(doc, n): """Get dependencies in the format of a list of (token, deprel, dependent_token) pairs- for all 'n' sentences in doc""" def getdeps(i): deps = [] for head, rel, dep in doc.sentences[i].dependencies: deps.append((head.text, rel, dep.text)) return deps return [getdeps(i) for i in range(n)]
cf60c17d64e8d45ce1cc9449db7fed4763bafbae
85,455
def map_generate_tuple(*args): """Generate a tuple with the results from the func. Used to assist dict(), map() to generate a dictionary. Args: *args (list): [0]:( key (immutable): key of the generated dict, func (function): function to be called, arg (tuple): arguments for func) Returns: tuple: (key, func(*arg)) """ key, func, arg = args[0][0], args[0][1], args[0][2] return (key, func(*arg))
ba2986a41aa28098aa2c7ecb8781429eaf0419f9
85,460
def get_page_range_from_response(response, start_from=1): """Returns a range of pages from Slack API response :param response: Slack API JSON response :return: Range of pages """ return range(start_from, response['paging']['pages'] + 1)
b061803ef7be9b6432defd1f77b49c40d1dcee7d
85,468
def transform(tr, x, y): """Try transforming with X and y. Else, transform with only X.""" try: x = tr.transform(x) except TypeError: x, y = tr.transform(x, y) return x, y
a186076bd065a3ad90b40be9202e22e47646c744
85,469
def json_to_form(form_dict, sep=u'_', pre=u'', _ls=u'-'): """Convert a json dictionary to match the flat form fieldnames.""" flat = [] for key, val in form_dict.iteritems(): key_name = pre + key if pre else key if isinstance(val, dict): leaf = json_to_form(val, sep=sep, pre=key_name + sep) flat += leaf elif isinstance(val, list) and (len(val) and isinstance(val[0], dict)): for i, row in enumerate(val): row_key = _ls.join([key_name, str(i)]) leaf = json_to_form(row, sep=sep, pre=row_key + _ls) flat += leaf else: node = (key_name, val) if key_name != u'id' and val is not None: flat.append(node) return flat
8e74cbda40fe68c08dcbf173bf56e87f59d1cc6b
85,470
def train_predictors_epoch(model, data_loader, loss_func, device, trial, epoch, silent=True): """ Train predictors networks for single epoch. Args: model (LDL): object of model to train data_loader (iterable): data loader of (x, y) samples loss_func (func): torch loss function device (torch.Device): device to move data to trial (int): number of trial (for logging) epoch (int): number of current epoch (for logging) silent (bool): if true outputs nothing Returns (list): list of lists of [trial_n, 'train', epoch_n, sample_n, predictor_n, loss] """ n = len(data_loader) results_data = [] # trial | split | epoch | sample | predictor | value for batch_i, (x, y) in enumerate(data_loader): x = x.view(1, x.shape[0]).to(device) y = y.to(device) # Activate corresponding predictor and get corresponding optimizer model.activate_predictor(class_=y.item()) optimizer = model.get_optimizer(y.item()) # Forward sample and calucate loss predictor_z, target_z = model(x) loss = loss_func(predictor_z, target_z).mean() # Backpropagate gradients optimizer.zero_grad() loss.backward() optimizer.step() # Logging info results_data.append( [trial, "train", epoch, batch_i, y.item(), loss.item()]) if not silent and batch_i % 100 == 0: msg = 'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}' print(msg.format(epoch + 1, batch_i, n, batch_i / n * 100, loss.item())) return results_data
28d42f77e4e861b0b1f7978800b6c7c2c5a71593
85,471
def get_output_nodes(node, cook=True): """ Get output nodes of node. Args: node (NodeGraphQt.BaseNode). cook (bool): call this function for cook node. Returns: list[NodeGraphQt.BaseNode]. """ nodes = {} for p in node.output_ports(): for cp in p.connected_ports(): n = cp.node() if cook and n.has_property('graph_rect'): n.mark_node_to_be_cooked(cp) nodes[n.id] = n return list(nodes.values())
6dab0ae91bed6f7ee7fe2d1682f31b380be9efd9
85,477
import math def apply(a,pt): """Applies a rotation of the angle a about the origin to the point pt""" c = math.cos(a) s = math.sin(a) return [c*pt[0]-s*pt[1],s*pt[0]+c*pt[1]]
5e49b757848e0989779a42942a918dea636c90a4
85,479
def split_list(input_list): """ Splits a list in half (assumes even length) :param input_list: :return: Tuple of the first and second halves of the list """ if len(input_list) % 2 == 0: half = len(input_list) // 2 return input_list[:half], input_list[half:] else: raise NotImplementedError("split_list requires a list of even length")
298c2c8ed3606d613f94a7a48b5a8ab14c847b4d
85,481
def get_f_H_i(region, direction, y1, y2, z): """開口部iの暖房期の取得日射熱補正係数 Args: region(int): 省エネルギー地域区分 direction(str): 外皮の部位の方位 y1(float): 日除け下端から窓上端までの垂直方向の距離 (mm) y2(float): 窓の開口高さ寸法 (mm) z(float): 壁面からの日除けの張り出し寸法(ひさし等のオーバーハング型日除けの出寸法は壁表面から先端までの寸法とする)(mm) Returns: float: 開口部iの暖房期の取得日射熱補正係数 """ # 暖房期における1地域から7地域までの南東面・南面・南西面 --- 式(1a) if (region in [1,2,3,4,5,6,7] and direction in ['南東', '南', '南西']): return min(0.01 * (5 + 20 * ((3*y1 + y2) / z)), 0.72) # 暖房期における1地域から7地域までの南東面・南面・南西面以外 --- 式(1b) elif (region in [1,2,3,4,5,6,7] and not(direction in ['南東', '南', '南西'])): return min(0.01 * (10 + 15 * ((2*y1 + y2) / z)), 0.72) else: ValueError("invalid value in region or direction")
63a6966667ea810ada0c147927ffb3433184c613
85,482
def calc_dv_dqdv(cycle_df): """This function calculates the dv and the dq/dv for a dataframe.""" cycle_df = cycle_df.reset_index(drop = True) cycle_df['dV'] = None cycle_df['Discharge_dQ'] = None cycle_df['Charge_dQ'] = None cycle_df['Discharge_dQ/dV'] = None cycle_df['Charge_dQ/dV'] = None for i in range(1,len(cycle_df)): cycle_df.loc[i, ('dV')] = cycle_df.loc[i, ('Voltage(V)')] - cycle_df.loc[i-1, ('Voltage(V)')] cycle_df.loc[i, ('Discharge_dQ')] = cycle_df.loc[i, ('Discharge_Capacity(Ah)')] - cycle_df.loc[i-1, ('Discharge_Capacity(Ah)')] cycle_df.loc[i, ('Charge_dQ')] = cycle_df.loc[i, ('Charge_Capacity(Ah)')] - cycle_df.loc[i-1, ('Charge_Capacity(Ah)')] #calculate dq/dv based off of discharge capacity - might change this later so user can choose to use charge or discharge cap. cycle_df['Discharge_dQ/dV'] = cycle_df['Discharge_dQ']/cycle_df['dV'] cycle_df['Charge_dQ/dV'] = cycle_df['Charge_dQ']/cycle_df['dV'] return cycle_df
2eec267c85a38765c9e383a287e5f04dffa3acbb
85,484
def filter_git_files(files): """Remove any git/datalad files from a list of files.""" return [f for f in files if not (f.startswith('.datalad/') or f == '.gitattributes')]
289658e78a4cbd718b2c5c6209231501d41dade5
85,485
def max_adjacent(filename, n): """Finds maximum product of n adjacent numbers in input number""" digits = [] with open(filename) as f: for line in f.readlines(): digits += [int(d) for d in line if d != "\n"] highest = 0 for i in range(len(digits)-n): current = 1 for j in range(n): current *= digits[i+j] highest = max(current, highest) return highest
572ba9c75ce4fd0aea450326ab144bed8c447321
85,494
def readFASTA(inputfile): """Reads a sequence file and returns as string""" with open(inputfile, "r") as seqfile: # skip the name line seq = seqfile.readline() seq = seqfile.read() seq = seq.replace("\n", "") seq = seq.replace("\t", "") return seq
36bb41f8de17b253c9274c7c3e1cb74c3307f15d
85,496
def process_pane_capture_lines(data, nlines=None): """Given the string blob of data from `tmux capture-pane`, returns an array of line strings. Arguments: data -- String blob of data from `tmux capture-pane` nlines -- Maximum number of lines to return Returns: An array of line strings. Each line string corresponds to a single visible line such that a wrapped line is returned as multiple visible lines. Nonprintable characters are removed and tabs are converted to spaces. """ # processes pane capture data into an array of lines # also handles nonprintables lines = [ ''.join([ ' ' if c == '\t' else ( c if c.isprintable() else '' ) for c in line ]) for line in data.split('\n') ] if nlines != None: lines = lines[:nlines] return lines
c01bca736164c6ab71a994157325ae206f9c9315
85,497
import torch def add_dim(tensor: torch.Tensor, index: int, *args, **kwargs) -> torch.Tensor: """ A wrapper function for torch.squeeze Args: tensor (): the tensor to unsqueeze index (): the index of to unsqueeze Returns: The unsqueeze tensor """ return tensor.unsqueeze(index)
65ea0dc5347d30b2de0f4797a314626d0564da42
85,503
def voltage(card, hours=None, offset=None, vmax=None, vmin=None): """Retrieve current and historical voltage info from the Notecard. Args: card (Notecard): The current Notecard object. hours (int): Number of hours to analyze. offset (int): Number of hours to offset. vmax (decimal): max voltage level to report. vmin (decimal): min voltage level to report. Returns: string: The result of the Notecard request. """ req = {"req": "card.voltage"} if hours: req["hours"] = hours if offset: req["offset"] = offset if vmax: req["vmax"] = vmax if vmin: req["vmin"] = vmin return card.Transaction(req)
4086a7c88962c5ebe629f00e03f26562e717713c
85,505
def parse_filters(filters=[]): """Function to parse `filters` CLI argument. Args: filters: A list of "key=value" strings. Returns: list: Parsed filters """ ret = {} for f in filters: k, v = f.split("=") ret[k] = v return ret
e25a649b3a1cee6e5694a6b3e6bb53d048302fb5
85,507
def commonMember(a, b): """ Return True if two list have at least an element in common, False otherwise """ a_set = set(a) b_set = set(b) if (a_set & b_set): return True else: return False
048215cf69caa934886fdf7585db69d963e8cc18
85,510
import random def get_random_available_wd(apic): """Select random workspace deployment from those that are available. """ available_wdeployments = apic.get_wdeployments() if len(available_wdeployments) == 1: wdeployment_id = available_wdeployments[0] elif len(available_wdeployments) > 1: wdeployment_id = available_wdeployments[random.randint(0, len(available_wdeployments)-1)] else: # len(available_wdeployments) == 0: print('no deployments are available') return None return wdeployment_id
2d5261667d3c48bde790d47fc2173f558be76010
85,511
def execute(dry_run, fct, *args, **kwargs): """Only execute function on non dry run mode""" if not dry_run: return fct(*args, **kwargs)
292863c09a9fbd958d9e30ee1d88968ea09ff1b0
85,512
import requests import time def get_data(url, max_retries=5, delay_between_retries=1): """ Fetch the data from http://www.mocky.io/v2/5e539b332e00007c002dacbe and return it as a JSON object. ​ Args: url (str): The url to be fetched. max_retries (int): Number of retries. delay_between_retries (int): Delay between retries in seconds. Returns: data (dict) """ for i in range(max_retries): try: raw = requests.get(url=url) data = raw.json() return data except: print("Exception occured on " + str(i+1) +" attempt to fetch data") time.sleep(delay_between_retries) raise ConnectionError
b278ee956ceb558740e7c467097613fd65080a1f
85,513
import math def fractionalPart(floater): """Returns the Fractional part (_fractionalPart(1.5) == .5) of a number""" return(math.modf(float(floater))[0]) # float conversion is there just in case I get an int or something
85cd53af31f33052b612a3d6196bcd9f6deb58a8
85,514
def cleanup(services): """Decorates scenario methods requiring a cleanup of resources. If a scenario method is not decorated by @cleanup all the resources (nova, glance and cinder) will be cleaned. :param services: list of services which will be cleaned. """ def wrap(func): func.cleanup_services = services return func return wrap
771fd28d871d9f33160182818acb7a76dca49bf2
85,515
def clean_domain(listing: dict) -> dict: """Extracts key listing details for API return. Args: listing (dict): Domain listing object Returns: dict: Address, property details, price & some ID info """ address = { "displayable_address": listing.get("property_details", {}).get( "displayable_address", "" ), "postcode": listing.get("property_details", {}).get("postcode", ""), "state": listing.get("property_details", {}).get("state", ""), } property_details = { "property_type": listing.get("property_details", {}).get("property_type", ""), "features": listing.get("property_details", {}).get("features", []), "bathrooms": listing.get("property_details", {}).get("bathrooms", None), "bedrooms": listing.get("property_details", {}).get("bedrooms", None), "carspaces": listing.get("property_details", {}).get("carspaces", None), } return { "id": listing.get("id", None), "listing_slug": listing.get("listing_slug", ""), "price": listing.get("price_details", {}).get("display_price", ""), "address": address, "property_details": property_details, }
7855b0a51e2820689d06e748f6bbff2e3de8df2f
85,518
def get_email_user(user): """ Return a string representation of a user object for emails. :param user: SODARUser object :return: string """ ret = user.get_full_name() if user.email: ret += ' ({})'.format(user.email) return ret
87d91ef56c14bf870198ddc6d06a45fa8df62673
85,519
def _match(seq1, seq2, mismatches): """ Test if sequence seq1 and seq2 are sufficiently similar. Parameters ---------- seq1 : str First sequence. seq2 : str Second sequence. mismatches : int Number of allowed mismatches between given sequences. Returns ------- bool Do sequence `seq1` and `seq2` have less or equal than ``mismatches`` """ seq1, seq2 = seq1.upper(), seq2.upper() matches = sum([(nuc1 == 'N' or nuc2 == 'N' or nuc1 == nuc2) for nuc1, nuc2 in zip(seq1, seq2)]) return max(len(seq1), len(seq2)) - matches <= mismatches
52a2a396e776fc73a80efbc0790d032e4c3a2731
85,520
def bit_flip(bit): """Do a bit flip""" if bit == '1': return '0' else: return '1'
4787d25983aed4b0aeee15e1fadc26ffa0cafb8e
85,527
import json def parse_sns_event(event): """Parse SNS event""" message = json.loads(event["Records"][0]["Sns"]["Message"]) alarm_state = message["NewStateValue"] instance_id = None for dim in message["Trigger"]["Dimensions"]: if dim["name"] == "InstanceId": instance_id = dim["value"] break if instance_id is None: raise AttributeError("ERROR: Could not find Instance ID") return instance_id, alarm_state
28bb63c9157eb32c9ed8d2848b5f23ecf4ee184b
85,533
from datetime import datetime def voto(ano): """ -> Informa se o voto é obrigatório, opcional ou negado em relação ao ano de nascimento :param ano: o ano de nascimento da pessoa :return: um valor literal (NEGADO, OPCIONAL OU OBRIGATÓRIO) """ idade = datetime.now().year - ano if idade < 16: return 'NEGADO' elif 16 <= idade <= 17 or idade >= 65: return 'OPCIONAL' else: return 'OBRIGATÓRIO'
ead9c2e61eb8f0c24eebd07afbfef5fba96a26c8
85,535
def chunk_string(instr): """Convert a single multi-line string into a list of chunks (= list of strings.) The beginning of each chunk is denoted by a keyword beginning with '%'. Empty lines are ignored. Continuation lines (backslash at end of a line) are honoured. Comment lines (first non-space character is '#') are ignored. """ # treat lines ending in '\' as continuation lines instr = instr.replace('\\\n', ' ') nonblank_lines = [ln.strip() for ln in instr.splitlines() if ln] ll_lst = [ln for ln in nonblank_lines if not ln.startswith('#')] ndxlst = [ndx for ndx, l in enumerate(ll_lst) if l.startswith('%')] + [len(ll_lst)] return [ll_lst[strt:stop] for strt, stop in [(ndxlst[nn], ndxlst[nn+1]) for nn in range(len(ndxlst)-1)]]
09cdf9f38b888b76517ca170bb44bb70562683f9
85,539
def module_has_weights(module): """ Check if the module has a parameter called "weight" :param module: Module :return: True, if module has a parameter called "weight", False otherwise """ for name, _ in module.named_parameters(): if name == "weight": return True return False
62d477bb6249870560ed0e504cb8a36ee7771c37
85,549
def _find_stream_by_id(streams, stream_id): """Find the stream that matches the given ID.""" for stream in streams: if stream["info"]["stream_id"] == stream_id: return stream
9f7785f439b022b30d720b63f8bb406db42e2728
85,554
def _reform_inputs(param_dict, out_species): """Copies param_dict so as not to modify user's dictionary. Then reformats out_species from pythonic list to a string of space separated names for Fortran. """ if param_dict is None: param_dict = {} else: # lower case (and conveniently copy so we don't edit) the user's dictionary # this is key to UCLCHEM's "case insensitivity" param_dict = {k.lower(): v for k, v in param_dict.items()} if out_species is not None: n_out = len(out_species) param_dict["outspecies"] = n_out out_species = " ".join(out_species) else: out_species = "" n_out = 0 return n_out, param_dict, out_species
4267f20f54db2fef120f5bdb44749670f323adaf
85,556
import configparser def load_config_file(settings_file): """Load three sets of OBD command names.""" config = configparser.ConfigParser() config.read(settings_file) startup = (config['STARTUP NAMES']['startup']).split() housekeeping = (config['HOUSEKEEPING NAMES']['housekeeping']).split() cycle = (config['CYCLE NAMES']['cycle']).split() return startup, housekeeping, cycle
4432981d5d57b63d67730cec4a78f407b37b2af1
85,558
def convert_table_to_clipboard_text(table_data): """ Converts 2-D tabular data to clipboard text. :param table_data: 2D tabular data :return: clipboard text """ return '\n'.join(['\t'.join(row) for row in table_data])
63ed17fe26be3c9f518b0d70b490021e4cd9fa5d
85,561
import json def write_json(data, filepath): """Write a JSON file Args: data: the dictionary to write filepath: the path to the JSON file Returns: returns a tuple of (filepath, data) """ with open(filepath, 'w') as stream: json.dump(data, stream, sort_keys=True, indent=2) return (filepath, data)
af48c935673ab0726b6f9eb6f79fe47a9f4e6d50
85,566
from csv import DictReader def read_csv(fname, handler, sheet, *args, **kwds): """Reads the given csv file *fname* as DictReader and calls handler with the first argument as the reader. Optional and named parameters are passed to the provided handler""" with open(fname) as f: r = DictReader(f) return handler(r, *args, **kwds)
17a4b3b2200c226e4cd55b30e3a4d99f7b72c611
85,568
def indent(s, n=4, notfirstline = False): """Indent string >>> indent("apa\\nrapa\\nbapa", 4) ' apa\\n rapa\\n bapa' >>> indent("apa\\nrapa\\nbapa", 4, notfirstline=True) 'apa\\n rapa\\n bapa' """ if notfirstline: return ('\n' + n*' ').join(s.split('\n')) else: return '\n'.join([n*' ' + line for line in s.split('\n')])
bf6f4cdb68cb2160579fc69de1025337e58ee712
85,569
from typing import Dict from typing import Any from typing import List import requests def find_employees_by_work_history( company_url: str, auth_dict: Dict[str, Any] ) -> List[int]: """ Finds a list of employee coresignal id numbers based on where the employees worked. Args ------ company_url: HttpUrl the linkedin_url of the company you want to find past employees of. auth_dict: auth_dict the authorization header. Check here for instructions on how to make this Returns -------- person_ids: List[int] list of strings where every item is an id number of someone who worked at the target comapny """ url = "https://api.coresignal.com/dbapi/v1/search/member" data = {"experience_company_linkedin_url": company_url} response = requests.post(url, headers=auth_dict, json=data) t = [int(x) for x in response.text[1:-1].split(",")] return t
8a4461be06e2bb3b1f256137f337b7eb36d4cf89
85,572
import hashlib def md5(filename: str) -> str: """Get MD5 digest for a file. Args: filename (str): The file to get an MD5 digest of. Returns: str: An MD5 digest. """ with open(filename, "rb") as f: file_hash = hashlib.md5() while chunk := f.read(8192): file_hash.update(chunk) return file_hash.hexdigest()
75d5b4c05322b366b82559479348651e3f8c4f68
85,575
from typing import Optional def _gen_resource_type_range(account_id: str, region: Optional[str]) -> str: """Generate a range key for the resource type index. Arguments: account_id (str): The AWS Account ID region (str): The AWS region (e.g. ``'eu-west-1'`` """ return f"{account_id}#{region or ''}"
5d3f889946d91503ad85592b58e0d604feb26df0
85,586
def find_all_unit_property_names(properties_dict: dict, features_dict: dict): """ Finds all existing units properties and units spikes features in the sorting dictionaries. """ properties_set = set() for k, v in properties_dict.items(): properties_set.update(list(v.keys())) features_set = set() for k, v in features_dict.items(): features_set.update(list(v.keys())) return properties_set, features_set
6276d69d6be6d430807b5b8a5c5f7566f2bf9a5b
85,591
def buildDpaActivationMessage(dpa_config): """Constructs a dpa activation message.""" return { 'dpaId': dpa_config['dpaId'], 'frequencyRange': dpa_config['frequencyRange'] }
21124acedb5b46d38c2e140131c1bb7600d5f734
85,592
def int_if_possible(val): """ Returns integer value of s if conversion succeeds, else s. """ if type(val) is list or type(val) is tuple: val = val[0] try: i = int(val) except ValueError: i = val return i # def make_info_dict(callers, records, pos1, pos2): # """ generate 'median' results for info fields from all records """ # callermap = { # "delly": {"fields": ["DV", "RV"], "tumor": 0}, # "svaba": {"fields": ["SR", "DR"], "tumor": 1}, # "gridss": {"fields": ["RP", "SR"], "tumor": 1}, # "brass": {"fields": ["PS", "RC"], "tumor": 1}, # "smoove": {"fields": ["SR", "PE"], "tumor": 0}, # } # # fields = ['CHR2', 'END', 'SVTYPE', 'SVLEN'] + ["SR", "DR"] + ["DV", "RV"] + ["RP"] # fields = [x["fields"] for x in callermap.values()] # fields = [item for sublist in fields for item in sublist] # info = {} # for field in fields: # answers = [] # for caller, record in zip(callers, records): # if caller in callermap.keys() and field in callermap[caller]["fields"]: # if field in record.format: # answers.append([caller,int_if_possible(record.samples[callermap[caller]["tumor"]][field])]) # elif field in record.info: # answers.append([caller,int_if_possible(record.info[field])]) # # elif field in record.format: # # answers.append([caller,int_if_possible(record.samples[callermap[caller]["tumor"]][field])]) # nanswers = len(answers) # if nanswers > 0: # # sorted_answers = sorted(answers) # # # doesn't quite deal with even #s correctly - can't average strings # # median_pos = int(nanswers / 2) # # median_answer = sorted_answers[median_pos] # # if not median_answer == 0: # # info[field] = median_answer # for a in answers: # info[a[0] + "_" + field] = a[1] if "SVTYPE" in info and info["SVTYPE"] in ["DUP", "DUP:TANDEM", "DEL", "INV"]: if not "SVLEN" in info: info["SVLEN"] = pos2 - pos1 return info
566408fb0d03bd6b476c2cdcdb5c34a4fe36600f
85,595
def construct_object_strings(job_id, num_parties): """ Given <job_id> and <num_parties>, return names of all pods involved in that computation. """ jiff_server_str = ["jiff-server-{}".format(job_id)] compute_party_pods = [] for i in range(1, num_parties + 1): compute_party_pods.append("conclave-{0}-{1}".format(job_id, i)) return jiff_server_str + compute_party_pods
c38f2575b7ee83c6b0751f23e3a44694d1ce4b96
85,602
def example_locus(example): """Gets the locus field from example as a string.""" return example.features.feature['locus'].bytes_list.value[0]
cfba2a67725535a9d1f2ce00995574615e8ed5e0
85,606
import re def get_languages(title, REGEX_LANGUAGES): """ Returns the languages from the input title """ languages = re.search(REGEX_LANGUAGES, title) if languages != None: return languages[0][2:-1] else: return ''
54bd73bd1a1fd274f301808dc6a0f1f612b19013
85,609
def bitand(a,b): """ bitwise AND: 110 & 101 eq 100 """ return int(a) & int(b)
d520633291e901a29f1f68f565a44f8b87cbe610
85,611
from typing import Any from typing import List def can_do(obj: Any) -> List[str]: """ List methods and attributes of a Python object. It is essentially the builtin `dir` function without the private methods and attributes @param obj: Any @return: list of attr names sorted by name """ return sorted([i for i in filter(lambda attr: not attr.startswith('_'), dir(obj))])
ba41b9a9de680fa65139bfb2fa04383ffbcbb637
85,612
from bs4 import BeautifulSoup def find_hidden_inputs(html): """Returns input elements whose type are hidden. """ return BeautifulSoup(html).find_all('input', {'type': 'hidden'})
119f791c8dee38f4f345cb7680e37c328ebe28e2
85,614
def es_new_query(tags=[]): """ Query from Elasticsearch Args: tags: list of tags Returns: job postings containing the tags, ranking by relevance """ tags = [t.lower() for t in tags] body = {"size" : 25, 'query':{ 'match':{ 'tags': ' '.join(tags) } } } return body
7afb299f215d26f34399eb67aaf8fbc798f94f0b
85,617
def vapour_pressure(temperature_C): """Tetens' formula for calculating saturation pressure of water vapor, return value is in pascals. https://wahiduddin.net/calc/density_altitude.htm NOTE: 1 Pascal == 100 mb >>> vapour_pressure(22) / 100 # in mb 26.43707387256724 """ return 100 * 6.1078 * 10**( (7.5 * temperature_C) / (237.3 + temperature_C))
721fa968b882d6c49bb571ca882dd461eccd5e91
85,618
from datetime import datetime def to_time(s): """ Captures time from image name and convert to time variable """ s = s.replace('/Users/lucasosouza/Documents/CarND/P3-final/IMG/center_', '') s = s.replace('.jpg', '') s = datetime.strptime(s, '%Y_%m_%d_%H_%M_%S_%f') return s
0225aff50916537e5c68607cc37c93d20dd76e1f
85,620
def is_same_shape(shape1, shape2): """Checks if two structures[could be list or single value for example] have the same shape""" if len(shape1) != len(shape2): return False else: for i in range(len(shape1)): if shape1[i] != shape2[i]: return False return True
7b9972f232f5dc786b8da2a0dfb181e0e4847493
85,622
import socket def read_until_prompt(socket: socket.SocketType): """Given a socket, read bytes from the socket until `=> `, then return the read bytes.""" bs = bytearray() while bs[-3:] != bytearray(b"=> "): bs.extend(socket.recv(1)) return bs
4bccc80472adcbbd2849f81c7c2d7ff3965e261c
85,623