content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def dice_coefficient(outputs, targets, eps=1.0): """Calculates the Dice coefficient between the predicted and target masks. More information: - https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient Args: outputs (torch.tensor): Outputs of the model (N x 1 x H x W). targets (torch.tensor): Manual segmentations (N x H x W). eps (float, optional): Smooth parameter (avoid zero divisions). Defaults to 1. Returns: (float): Dice coefficient. """ outputs = outputs.contiguous().view(-1) targets = targets.contiguous().view(-1) intersection = (outputs * targets).sum() union = outputs.sum() + targets.sum() return ((2 * intersection + eps) / (union + eps))
b74d2ae00e30bc1127c098a17b427e9fcc3ac30b
36,064
def guid2string(val): """ convert an active directory binary objectGUID value as returned by python-ldap into a string that can be used as an LDAP query value """ s = ['\\%02X' % ord(x) for x in val] return ''.join(s)
b9c7db839b150278406c44bcaa10f1472b5b0598
36,065
def intersection(a, b=None): """ A : bedtool B : bedtool Returns A - B and A intersect B A with b and returns everything in a but not b and everything in a but... ??? """ overlapping = a.intersect(b, wa=True, u=True, stream=True).saveas() remaining = a.intersect(b, wa=True, v=True, stream=True).saveas() return remaining, overlapping
d01786484431fcec20deffa057c4218af03ed03b
36,067
def calculate_center(shape: tuple): """ Calculate and return the center point of ``shape``. :param shape: A tuple (width, height) of odd numbers :return: A ``tuple`` (x, y) containing the center points coordinates """ if any(d%2 == 0 for d in shape): raise ValueError("width and height of shape must be odd numbers") x, y = [int((d-1)/2) for d in shape[-2:]] return (x, y)
dbe2485b39c7670adfb832d3626e7e9106e5b2c0
36,070
def format_datetime(datetime_): """Convert datetime object to something JSON-serializable.""" if datetime_ is None: return None return datetime_.strftime('%Y-%m-%dT%H:%M:%SZ')
16fbcff4737ec985405c8267b74e9085f56d06df
36,071
def _set_key2sl(key): """Convert a key part to a slice part.""" if isinstance(key, int) or key is Ellipsis: return key elif isinstance(key, slice): # Forbid slice steps if key.step is not None: raise ValueError("farray slice step is not supported") return key else: raise TypeError("expected int, slice, or ...")
c735f009ef5576f4eaf293152b715aed8d20f965
36,073
def calc_clean_summary_stats(df_time, category): """Calculate summary statistics (ALSFRS-R subscores, vital capacity max/average)""" # Add subject-project identifier, drop incomplete rows df_time['subj_proj_id'] = df_time['SubjectUID'].astype('str') + '_' + df_time['dataset'] if category == 'alsfrs': df_time['alsfrst_bulb'] = (df_time[['alsfrs1', 'alsfrs2', 'alsfrs3']]).sum(axis=1, skipna=False) df_time['alsfrst_fine'] = (df_time[['alsfrs4', 'alsfrs5a', 'alsfrs5b', 'alsfrs6']]).sum(axis=1, min_count=3) df_time['alsfrst_gross'] = (df_time[['alsfrs7', 'alsfrs8', 'alsfrs9']]).sum(axis=1, skipna=False) df_time['alsfrst_resp'] = (df_time[['alsfrsr1', 'alsfrsr2', 'alsfrsr3']]).sum(axis=1, skipna=False) df_time = df_time[ ['subj_proj_id', 'SubjectUID', 'dataset', 'Visit_Date', 'alsfrst_bulb', 'alsfrst_fine', 'alsfrst_gross', 'alsfrst_resp', 'alsfrst']].drop_duplicates().dropna(how='any') # Drop ALSFRS_R rows if subscores do not sum correctly to total df_time = df_time[(df_time['alsfrst']==(df_time[['alsfrst_bulb', 'alsfrst_fine', 'alsfrst_gross', 'alsfrst_resp']]).sum(axis=1))].copy() elif category == 'fvc': df_time['fvcp_avg'] = (df_time[['fvcp1', 'fvcp2', 'fvcp3']]).mean(axis=1) df_time['fvcp_max'] = (df_time[['fvcp1', 'fvcp2', 'fvcp3']]).max(axis=1) df_time = df_time[ ['subj_proj_id', 'SubjectUID', 'dataset', 'Visit_Date', 'fvcp_avg', 'fvcp_max']].drop_duplicates().dropna( how='any') elif category == 'svc': df_time['svcp_avg'] = (df_time[['svcp1', 'svcp2', 'svcp3']]).mean(axis=1) df_time['svcp_max'] = (df_time[['svcp1', 'svcp2', 'svcp3']]).max(axis=1) df_time = df_time[ ['subj_proj_id', 'SubjectUID', 'dataset', 'Visit_Date', 'svcp_avg', 'svcp_max']].drop_duplicates().dropna( how='any') # Remove any patients with Visit Dates less than 0 # Affects 5 participants in ALSFRS-R: Emory may be data entry error df_time = df_time[~df_time['SubjectUID'].isin(df_time[df_time['Visit_Date'] < 0]['SubjectUID'])].copy() # If participant has multiple score measurements on a single day, take average df_time = df_time.groupby(['subj_proj_id', 'SubjectUID', 'dataset', 'Visit_Date']).mean().reset_index() df_time = df_time.sort_values(by=['dataset', 'SubjectUID', 'Visit_Date']) return df_time
318e12ee7bb86ca99e942e748907567a589aff0a
36,075
def contains(arr, s): """ 反向逻辑,当 arr 中没有一个包含 s 的时候才为 true """ return not len(tuple(filter(lambda item: (not (item.find(s) < 0)), arr)))
470ce16553db4140c0d912537dc9ea068978e367
36,077
def transform_vec(t, tf): """ transforming a vector :param t: a list of vector :param tf: a matrix :return: transform t """ result = [] for item in t: result.append(tf*item) return result
238ec13ccd4d4cac088ea741239789341daa8341
36,078
def sb_xs_compare(sb, sheet): """Compute the absolute and percentage difference between the maximum fields of one CrossSection in a SectionBook and all the other CrossSections args: sb - SectionBook object sheet - sheet string of the CrossSection in sb that should be compared with all the other CrossSections returns: abs_dif - DataFrame with the absolute difference of the maximum fields at ROW edges rel_dif - DataFrame with the relative difference of the maximum fields at ROW edges (multiply by 100 to get percentage dif)""" rem = sb.ROW_edge_max abs_dif = rem - rem.loc[sheet] rel_dif = (rem - rem.loc[sheet])/rem return(abs_dif, rel_dif)
491dd1ada0b23882d2f9de474b2ccb56c36eb705
36,079
def peak_hr(df_pk): """Performs Peak Hour calculations by combining directional aadt values with vehicle hourly volume factors determined by Metro. Args: df_pk, a pandas dataframe. Returns: df_pk, a pandas dataframe conaining new PK_HR column with completed calculations. """ df_pk['PK_HR'] = (df_pk['dir_aadt'] * df_pk['2015_15-min_Combined']) return df_pk
ec5dfafd2a9ee53af8c7aff59ee8a38096be91a4
36,080
def percent_change(a,b): """ Returns the percentage change from a to b. """ return float((b - a)/a)
34bf312683f72107404919bb01b9f01464e23c47
36,081
import torch def make_cuda(tensor): """Turn the tensor into cuda if possible.""" if torch.cuda.is_available(): return tensor.cuda() return tensor
14f05f1f27a0f846448ac89a2e8371b4acac9711
36,083
def parseConfigParser(config_data=dict({}), theConfig=None, overwrite=True): """ Merges the Configuration Dictionary into a configparser. param config_data - dict the configuration to merge. param theConfig - configparser.ConfigParser the ConfigParser. param overwrite - boolean determining if the dict is record of truth or if theConfig is. """ try: if config_data is None: config_data = dict({}) if theConfig is not None: for someSection in theConfig.sections(): if str(someSection) not in config_data.keys(): config_data[someSection] = dict({}) for someOpt in theConfig.options(someSection): if (str(someOpt) not in config_data[someSection].keys()) or (overwrite is True): config_data[someSection][someOpt] = theConfig.get(someSection, someOpt) except Exception as err: print(str("""Error in baseconfig.parseConfigParser""")) print(str(err)) print(str(type(err))) print(str((err.args))) return config_data
de0f895c612f8fa7177c1fb23f6b864eeba0fbe5
36,085
def check_rent_history(rent_list, username): """ return farm ids that the given username has rented before """ farm_rent_before = [] for rent in rent_list: if rent.get('username') == username: farm_rent_before.append(str(rent.get('farm_id'))) return farm_rent_before
b480651ab7e17673df91c7e34dc5d70f8f360ac7
36,086
def GetMavenVersion(api, language_version): """Returns the maven version.""" if api.get('ownerDomain') == 'google.com': return '%s-rev%s-%s' %(api['version'], api['revision'], language_version) return '%s-%s-SNAPSHOT' % (api['version'], language_version)
74d60f3309b596087932406e33717c246d9e3905
36,087
def db2lin(x): """From decibel to linear""" return 10.0**(x/10.0)
653fb36943baeb393d4cec07544df95a230a5880
36,088
def remove_nan_columns(dataframe): """Removes columns of data frame where any value is NaN. Args: dataframe (pandas.DataFrame): Input dataframe. Returns: tuple: tuple containing: pandas.DataFrame: Dataframe with columns containing NaN values removed. numpy.array: Array of bools indicating which columns were kept. """ df_copy = dataframe.set_index("timestamp") selected_columns_mask = df_copy.notnull().all().values return dataframe.dropna(axis=1, how="any"), selected_columns_mask
058c0469dd93a71866e1a09d8dd16b80379c7d15
36,090
def ztf_magnitude_zero_point(bands=''): """ Sample from the ZTF zeropoint distribution """ dist = {'g': 26.325, 'r': 26.275, 'i': 25.660} return [dist[b] for b in bands.split(',')]
3aecf6b0c1ca3162924cca3494e91d2f792c0338
36,091
from typing import Dict from typing import Any from typing import List def extractFields(data: Dict[str, Any], fields: List[str], returnEmpty: bool = True) -> Dict[str, Any]: """ Extracts the Listed Params from the dict """ cleanedData = {} for field in fields: if field in data: if returnEmpty is False and data[field] in ["", None, {}, []]: continue cleanedData[field] = data[field] return cleanedData
f82f6f97cb585fc4ce4a40d43e24b7c0d4640e3b
36,092
import re def to_methods(function_str): """ to_methods is a helper function that aims to replace all the "toX" methods from PyQt4-apiV1.0. :param function_str: String that represents something that may have the toX methods in it. :type function_str: str :return: A string that, if a method was found, has been cleaned. :rtype: str """ match = re.match( r""" (?P<object>.*?) # Whatever was before it. \.to(?: # Get all the options of. String| # toString Int| # toInt Float| # toFloat Bool| # toBool PyObject| # toPyObject Ascii # toAscii )\(.*?\)(?P<end>.*)""", function_str, re.VERBOSE | re.MULTILINE ) if match: replacement = match.groupdict()["object"] replacement += match.groupdict()["end"] return replacement return function_str
af7a7dc5f332e10b5ea7e8fda52d1dbad016db61
36,094
from bs4 import BeautifulSoup import re def get_trademark_url(downloaded_htmls) -> list: """Parse the data, extract trademark application ID and url and save all in a dictionary.""" tm_name_url_list = [] for clean_tm_file in downloaded_htmls: html = open(clean_tm_file, 'r', encoding='utf-8') soup = BeautifulSoup(html, 'lxml') results_ids = soup.find_all('div', id=re.compile("flag_rowId_")) all_clean_ids = list(re.compile('(?=flag_rowId_).*?\"', flags=re.MULTILINE | re.IGNORECASE) .findall(str(results_ids))) map(lambda value: re.sub('(\"|flag_rowId)', '', value), all_clean_ids) for i, value in enumerate(all_clean_ids): no_quotation = value.replace('\"', '') no_id = no_quotation.replace('flag_rowId_', '') include_name = 'for code ' + no_id + ' in file ' + clean_tm_file + ': ' new_value = include_name all_clean_ids[i] = value.replace(value, new_value) created_url = 'https://www.tmdn.org/tmview/get-detail?st13=%s' % no_id one_link_url_dict = dict() one_link_url_dict[new_value] = created_url tm_name_url_list.append(one_link_url_dict) return tm_name_url_list
fc7736e8d294cefb8ea6622e8c9cf156a6c6a417
36,095
import torch def kl_divergence(q, p): """Calculates the KL divergence between q and p. Tries to compute the KL divergence in closed form, if it is not possible, returns the Monte Carlo approximation using a single sample. Args: q : torch.distribution Input distribution (posterior approximation). p : torch.distribution Target distribution (prior). Returns: The KL divergence between the two distributions. """ if isinstance(q, torch.distributions.Normal) \ and isinstance(p, torch.distributions.Normal): var_ratio = (q.scale / p.scale.to(q.scale.device)).pow(2) t1 = ((q.loc - p.loc.to(q.loc.device)) / p.scale.to(q.loc.device)).pow(2) return 0.5 * (var_ratio + t1 - 1 - var_ratio.log()) else: s = q.rsample() return q.log_prob(s) - p.log_prob(s)
3d0be3599332840057f07d9bee71681f4a710557
36,096
import re def ipv4_address_validator(addr): """ Regex to validate an ipv4 address. Checks if each octet is in range 0-255. Returns True/False """ pattern = re.compile( r"^([1]?\d?\d|2[0-4]\d|25[0-5])\.([1]?\d?\d|2[0-4]\d|25[0-5])\.([1]?\d?\d|2[0-4]\d|25[0-5])\.([1]?\d?\d|2[0-4]\d|25[0-5])$" ) if pattern.fullmatch(str(addr).strip().strip("\n")): return True else: return False
6204d9d54536e510b5556d13ee259ec3d7b3fc95
36,098
import base64 def load_signature(filepath: str) -> bytes: """Load the signature""" with open(filepath, "rb") as f: signature: bytes = base64.b64decode(f.read()) return signature
10765be49229f3f061649cab7b6ece2bc5eb5284
36,100
def plot_hist(self, parameters=None, mean_line=False, **kwds): """ Make a histogram of the WaterFrame's. A histogram is a representation of the distribution of data. This function calls pandas.DataFrame.hist(), on each parameter of the WaterFrame, resulting in one histogram per parameter. Parameters ---------- parameters: str, list of str, optional (parameters=None) keys of self.data to plot. If parameters=None, it will plot all parameters. mean_line: bool, optional (mean_line=False) It draws a line representing the average of the values. **kwds: All other plotting keyword arguments to be passed to DataFrame.hist(). https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.hist.html Returns ------- ax: matplotlib.AxesSubplot Axes of the plot. """ if parameters is None: parameters = list(self.parameters) if isinstance(parameters, str): parameters = [parameters] axes = self.data.hist(column=parameters, **kwds) parameter_counter = 0 try: for ax in axes: ax.set_xlabel("Values") ax.set_ylabel("Frequency") if mean_line is True: if parameter_counter < len(parameters): x_mean = self.mean(parameters[parameter_counter]) ax.axvline(x_mean, color='k', linestyle='dashed', linewidth=1) parameter_counter += 1 except AttributeError: # Creation of the mean line parameter_counter = 0 for irow in range(len(axes)): for icol in range(len(axes[irow])): if parameter_counter < len(parameters): axes[irow, icol].set_xlabel("Values") axes[irow, icol].set_ylabel("Frequency") if mean_line is True: x_mean = self.data[axes[irow, icol].get_title()].mean() axes[irow, icol].axvline(x_mean, color='k', linestyle='dashed', linewidth=1) parameter_counter += 1 return axes
5c927a170fff37691135dd367f0b42247d4aa3e1
36,101
def isDelimited(value): """ This method simply checks to see if the user supplied value has delimiters. That is, if it starts and ends with double-quotes, then it is delimited. """ if len(value) < 2: return False if value[0] == '"' and value[-1] == '"': return True else: return False
4a89211c27f6cf826b3b29cc718b0e00a4184a0f
36,103
def createArrays(trainDf, testDf): """createArrays Saves predictions into a .csv file. Input: trainDf -- the training DataFrame (pandas) testDf -- the testing DataFrame (pandas) Output: train_X -- training set (sample) train_y -- target feature test_X -- testing set """ train_X = trainDf.drop(['Cover_Type'], axis=1).values # training set (sample) train_y = trainDf.Cover_Type.values # target feature (to predict) test_X = testDf.drop(['Cover_Type'], axis=1).values return train_X, train_y, test_X
6abdcfcfb33d53f90f2dffd04f624048f9738436
36,104
import types def asmodule(module): """ Return the module references by `module` name. If `module` is already an imported module instance, return it as is. """ if isinstance(module, types.ModuleType): return module elif isinstance(module, str): return __import__(module, fromlist=[""]) else: raise TypeError(type(module))
1e2def23f770f9bc84aa8af3273f830fedea3282
36,105
def flatten(list_of_lists): """Takes a list of lists and turns it into a list of the sub-elements""" return [item for sublist in list_of_lists for item in sublist]
11df76be33e96295e4e5230873368be707ae032f
36,106
def leftmost(n): """ Return the leftmost item in a binary tree. """ while n.left is not None: n = n.left return n
f22dbb611a088e52c37002c1dad3a704033145e0
36,107
def binary_search(list_: list, item: int = 0): """Returns the index of the number to find in the SORTED list :param list_: SORTED list to find the item :type list_: list :param item: The number to find the index :type item: int :rtype: int, else: None :return: The index of the number found """ lowest = 0 highest = len(list_) - 1 while lowest <= highest: middle = int((lowest + highest) / 2) guess = list_[middle] if guess == item: return middle if guess > item: highest = middle - 1 else: lowest = middle + 1 return None
5f5256a0d26ed89bb038e0b06432686eb01e12c7
36,109
def clean_symbol_code(code): """Cleans Weather Symbol code""" sentence = code.split('_') return (' '.join(sentence)).capitalize()
3dd7061da7dd40c80991e94f20feeb7539ff7db8
36,111
def sdfGetPropList(mol): """ sdfGetPropList() returns the list of all property names in molecule mol """ sdfkeyvals = mol["keyvals"] return [pair[0] for pair in sdfkeyvals] if sdfkeyvals else []
da897a54a4e0bfc59ca2bd78a2d39d0b64b32db1
36,112
import os def parserSingle(in_file): """ single file version for placeholder for if("do preproc step") .. this is now a mess and just a placeholder """ out_file = os.path.abspath(in_file) return in_file
7091526e6809fc97f565666387829b84bb86f8bf
36,116
from typing import Dict def str_dict_to_bytes(str_dict: Dict[str, str]) -> Dict[bytes, bytes]: """ Converts the key and the value of a dict from str to bytes. """ out = {} for key, value in str_dict.items(): out[key.encode()] = value.encode() return out
43b62237b9c80c6e613c363aace17324647086b9
36,117
import subprocess import logging def update_docker(canary, version='18.06.3~ce~3-0~ubuntu'): # pragma: no cover """Update the docker package prior to reboot. This will automatically keep the docker package up to date and running prior to reboot will ensure that no containers are running, so no disruptions. This will also remove older docker packages (docker-engine) automatically. If the bot is a docker_canary, then the latest version of docker-ce will be installed, otherwise the pinned version= version will be installed. Args: canary: (bool) If this is a canary host or not. version: (str) The version of docker-ce to ensure is installed. """ # Not doing a lot of dpkg/apt-cache checking here as the runtime to just try # an install is only 1.1 seconds. try: subprocess.check_call(['/usr/bin/apt-get', 'update']) except subprocess.CalledProcessError: # We don't care enough to abort reboot here, only if install fails. logging.exception('Unable to apt-get update.') if canary: package_with_version = 'docker-ce' else: package_with_version = 'docker-ce=%s' % version try: subprocess.check_call(['/usr/bin/apt-get', 'install', '-y', package_with_version]) except subprocess.CalledProcessError: logging.exception('Unable to install/upgrade docker-ce to %s.', version) return False return True
81616509688ffac67d03eb8cf40db8b82c25d01b
36,118
def gcd(x, y): """ Euclid's algorithm - If n divides m evenly, then n is the GCD. Otherwise the GCD is the GCD of n and the remainder of m divided by n. """ if x % y == 0: return y else: return gcd(y, x%y)
db31a4b36929c9b7e83508beee724b94eea27a12
36,119
def string_breakdown(the_string :str) -> list: """Accepts a string and returns a list of tuples of form [(char, count)]""" checked = [] components = [] for char in the_string: if char not in checked: count = the_string.count(char) checked.append(char) components.append((char, count)) return components
2283297aff80d548625930482249a31284e49eae
36,121
def read_lines(path): """Return list of lines comprising specified file. Newlines at the end are included. """ with open(path, "r") as file: return file.readlines()
e0986c649ab6b64c665e8aa887c2625ad21b77be
36,122
from typing import List def _sampling_from_alias_wiki( alias: List[int], probs: List[float], random_val: float, ) -> int: """ Draw sample from a non-uniform discrete distribution using Alias sampling. This implementation is aligned with the wiki description using 1 random number. :param alias: the alias list in range [0, n) :param probs: the pseudo-probability table :param random_val: a random floating point number in the range [0.0, 1.0) Return the picked index in the neighbor list as next node in the random walk path. """ n = len(alias) pick = int(n * random_val) y = n * random_val - pick if y < probs[pick]: return pick else: return alias[pick]
c67d2d698ace15c798cda51049b7ddd880c48a71
36,124
import torch def get_bins(attr, ncenters, radius): """Sort attributes into bins and return indices Parameters: ----------- attr : torch FloatTensor, N x Na Attributes """ # sort attributes centers = torch.linspace(0, 1, ncenters).to(attr.device) lower_edge = centers - radius upper_edge = centers + radius smt = torch.logical_and(attr[:, :, None] > lower_edge[None, None, :], attr[:, :, None] < upper_edge[None, None, :]) # sort into bins by doing the generalized out product between all rows smt = smt.swapaxes(0, 1).swapaxes(1, 2) bins = smt[0] for i in range(1, attr.shape[1]): bins = torch.einsum('i...,j...->ij...', bins, smt[i]) # return flattened bin tensor return bins.reshape(-1, attr.shape[0])
9c92e6208ac272eff4be6f6b868ae12cc989205e
36,125
from typing import List def _filter_nums(*, nums: List[int], min_lim: int=0, max_lim: int) -> List[int]: """Given two limits, remove elements from the list that aren't in that range.""" return [num for num in nums if num > min_lim and num <= max_lim]
8e6e0b274e17eb38dcd25e0b307a26b26f1d1fbd
36,126
def get_item_attr(idmap, access): """ Utility for accessing dict by different key types (for get). For example:: >>> idmap = {(1,): 2} >>> get_item_attr(idmap, 1) 2 >>> idmap = {(1,): 2} >>> get_item_attr(idmap, {"pk": 1}) 2 >>> get_item_attr(idmap, (1,)) 2 """ if isinstance(access, dict): keys = [] for names in sorted(access): keys.append(access[names]) return idmap.get(tuple(keys)) elif isinstance(access, int): return idmap.get((access,)) else: return idmap.get(access)
abaf1250c34b94393851e7a0764af3bf1c3eb116
36,127
import torch def gdt_torch(X, Y, cutoffs, weights=None): """ Assumes x,y are both (B x D x N). see below for wrapper. * cutoffs is a list of `K` thresholds * weights is a list of `K` weights (1 x each threshold) """ device = X.device if weights is None: weights = torch.ones(1,len(cutoffs)) else: weights = torch.tensor([weights]).to(device) # set zeros and fill with values GDT = torch.zeros(X.shape[0], len(cutoffs), device=device) dist = ((X - Y)**2).sum(dim=1).sqrt() # iterate over thresholds for i,cutoff in enumerate(cutoffs): GDT[:, i] = (dist <= cutoff).float().mean(dim=-1) # weighted mean return (GDT*weights).mean(-1)
7d4646504bfff5651ca9c0b15519f000546fca56
36,128
def validate_domain(email, domains): """Check that the domain matches that of the assessment.""" if email.split("@")[1].lower() in domains: return True return False
b1fbf9ca276ce72b826a0a49086d3c6bdc044ccb
36,129
def round_to_second(dt): """ datetime对象round到秒 :param dt: :return: """ res = dt.replace(microsecond=0) return res
2be5f589be54bbb113b307d5b38ba5d6e0a13a32
36,131
import re def sub_strong(line, regex): """Surround some patterns in <strong> tags.""" result = re.search(regex, line) if result: for match in result.groups(): line = line.replace(match, '<strong>{}</strong>'.format(match), 1) return line
a539420012db702501f293c081457392ba0ec4a7
36,132
from typing import List from typing import Dict from typing import Any def split_docker_list(d_list: List[Dict[str, Any]]): """Splits the list of docker in test generation docker and test environment docker. The split is done by checking for the 'generator' key in the dockers dict defined in the json file. Only the docker for the test case / test data generation should contain that key. All others, which are used to set the environment for running the tests, must not have it. Args: d_list: list of dictionaries with docker information Returns: (list, list): - list of dictionaries with test generator docker information (can not contain more than one element) - list of dictionaries with test environment docker information Raises: RuntimeError: if there is more than one test generator docker defined. """ test_d_list = [] generator_d_list = [] num_test_generator = 0 for docker in d_list: if "generator" in [*docker]: generator_d_list.append(docker) num_test_generator += 1 if num_test_generator > 1: error_msg = "More than one docker is defined as 'generator'. " \ "Only one item in dockerlist json file should contain the 'generator' key." raise RuntimeError(error_msg) else: test_d_list.append(docker) return generator_d_list, test_d_list
6c01f81db08270c60d08153448dd8989ff87ad4c
36,133
def in_docker(): """Returns: True if running in a docker container, else False""" try: with open("/proc/1/cgroup", "rt") as ifh: contents = ifh.read() return any([word in contents for word in ["actions_job", "docker"]]) except OSError: return False
eeb000ee4928105c5c978045ae50a27b85d5b735
36,135
def crimeScore(data, area=0.04): """ A simple function that is intended to take the amount of crimes and to divide them by the total search radius (0.04) to return a sort of "crime score" """ return int(len(data["crimes"]) / area)
a684f9b37bdb403225dadf35ef4c92b6650b89d7
36,136
def round_to_int(number, precision): """Round a number to a precision""" precision = int(precision) rounded = (int(number) + precision / 2) // precision * precision return rounded
75f7b23c3f6426dc3ed0c54f3baa491e3c658a14
36,138
def mod_exponent(base, power, mod): """ Modular exponential of a number :param base : number which is going to be raised :param power : power to which the number is raised :param mod : number by modulo has to be performed :return : number raised to power and modulo by mod [(base ^ power) % mod] """ res = 1 # Initialize result base = base % mod # Update base if it is more than or equal mod_ while power > 0: if power & 1: # if pow_ is odd multiply it with result res = (res * base) % mod power = power >> 1 # _pow must be even now base = (base * base) % mod return res
f0b0de989c8ab38a11ca41126e30234765571ca6
36,140
import requests def post_input_file(input_text): """Posts a string as a file (multipart/form-data) named 'input' to the REST API and returns the response. """ url = 'http://localhost:5000/parse' return requests.post(url, files={'input': input_text})
4b6613e95f3221e1a92c88d449aa41a562182304
36,141
def table2list(table, consider_floats_only=True): """ Given a pandas table, convert it to a list of values :param consider_floats_only: should we consider only floats? :return list """ my_list = [] for col in table.columns: column_values = table[col] for i in column_values: try: my_list.append(int(i)) except: my_list.append(i) if consider_floats_only: return [i for i in my_list if isinstance(i, int)] return my_list
3469123bd5e1fb2261164178df1d834104504e72
36,142
import subprocess def list_installed_pkgs(args): """Lists the members of a given category of packages; returns list.""" prefix = "pm list packages" if args.user: suffix = "-3" elif args.system: suffix = "-s" elif args.disabled: suffix = "-d" else: suffix = "" pkgs = [i[8:] for i in subprocess.Popen("{0} {1}".format(prefix, suffix), stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = True).communicate()[0].decode("utf-8").split("\n") if i] return pkgs
2db06ef60d6c3b79f2c3a9ced8ed3aa6d673da06
36,143
import os def find_candidate_images(input_path): """ Finds all candidate images in the given folder and its sub-folders. Returns: images: a list of absolute paths to the discovered images. """ images = [] for root, dirs, files in os.walk(input_path): for name in files: file_path = os.path.abspath(os.path.join(root, name)) if ((os.path.splitext(name)[1]).lower() in ['.jpg','.png','.jpeg']): images.append(file_path) print(images) return images
0078608652b311758e2b5d43a4772a17b7970f96
36,144
import numpy def assemble_matrixQ (mat_prop, fail_type = None): """ Assembles Q matrix (reduced elastic matrix) for a given layer. """ if not isinstance(mat_prop, dict): raise TypeError('mat_prop must be a dictionary') # Degradation Factor (for failed layers) df = 0.001 if fail_type == "fiber" or fail_type == "shear": E1 = mat_prop["E1"]*df E2 = mat_prop["E2"]*df n12 = mat_prop["n12"]*df G12 = mat_prop["G12"]*df n21 = n12*E2/E1 elif fail_type == "matrix": E1 = mat_prop["E1"] E2 = mat_prop["E2"]*df n12 = mat_prop["n12"]*df G12 = mat_prop["G12"]*df n21 = n12*E2/E1 else: E1 = mat_prop["E1"] E2 = mat_prop["E2"] n12 = mat_prop["n12"] G12 = mat_prop["G12"] n21 = n12*E2/E1 Q11 = E1/(1 - n12*n21) Q12 = n12*E1*E2 / (E1 - (n12 ** 2) * E2) Q22 = E1*E2 / (E1 - (n12 ** 2) * E2) Q66 = G12 Q = numpy.zeros((3, 3)) Q = numpy.array([[Q11, Q12, 0], [Q12, Q22, 0], [0, 0, Q66]]) return Q
a0a12b3891e8e9fde9ed16faf6c402941523b107
36,146
def _get_low_pressure(self, g_l, d_l, frac_cells): """ Obtains the coefficients of the (projected) lower-dimensional pressures Parameters ---------- g_l : PorePy object Lower-dimensional grid. d_l : Dictionary Lower-dimensional data dictionary. frac_cells : NumPy nd-Array Lower-dimensional fracture cells Raises ------ ValueError If the pressure has not been reconstructed Returns ------- p_low : NumPy nd-Array Coefficients of the (projected) lower-dimensional pressures """ # Retrieve lower-dimensional reconstructed pressure coefficients if "recon_p" in d_l[self.estimates_kw]: p_low = d_l[self.estimates_kw]["recon_p"].copy() else: raise ValueError("Pressure must be reconstructed first") p_low = p_low[frac_cells] return p_low
374c812a39aa045b68d70515d62d13aad69655c3
36,147
def read_from_y(state): """Reads the contents of the Y scratch register.""" return state.y_register
f5d6f258f267d4c1392b393ffc680cdf8aa59748
36,150
def case_of(value: str) -> int: """Returns 1 for all uppercase, 0 for all lowercase, and -1 for mixed case.""" if all(map(lambda x: x.isupper(), value)): return 1 elif all(map(lambda x: x.islower(), value)): return 0 return -1
e0b56890ff73a9b59385e716be6ea3c41ab03bc6
36,151
def __filter_event_type__(trace_events, event_type): """ Looks for the events in the trace matching the event type :param trace_events: Events found in the trace (filtered by family). :param event_type: Event type to filter. :return: Filtered trace """ filtered = [] for line in trace_events: if line[0] == event_type: filtered.append(line) return filtered
4a4b49272014ff2a2f552f3a4a381f859a886a5a
36,152
from typing import Callable def map(f: Callable, collection): """Transform each element of a collection. Examples -------- .. doctest:: >>> a = ['The', 'quick', 'brown', 'fox'] >>> hl.eval_expr(hl.map(lambda x: hl.len(x), a)) [3, 5, 5, 3] Parameters ---------- f : function ( (arg) -> :class:`.Expression`) Function to transform each element of the collection. collection : :class:`.ArrayExpression` or :class:`.SetExpression` Collection expression. Returns ------- :class:`.ArrayExpression` or :class:`SetExpression`. Collection where each element has been transformed by `f`. """ return collection._bin_lambda_method("map", f, collection.dtype.element_type, lambda t: collection.dtype.__class__(t))
677c8b5185e45126c85448020a6a914c0938f785
36,154
def _is_algorithm_kd_tree(model): """Checks if the algorithm for the scikit model is set to 'kd_tree'.""" return model.algorithm is 'kd_tree' or (model.algorithm is 'auto' and model._fit_method is 'kd_tree')
24bb52161747753c462670c36609e3afe9989ca0
36,155
import argparse def parse_args(): """Parse command line arguments.""" parser = argparse.ArgumentParser(description='Convert plate reader ' 'Excel file to fraction infectivity' 'csv.') parser.add_argument('infile', type=str, help='Path to excel file' 'to convert to fraction infectivity.') parser.add_argument('outfile', type=str, help="Path for output" "fraction infectivity csvs.") parser.add_argument('sheet_map', type=str, help="File to map " "plate number and samples. Must have columns:" "'Plate', 'Sample', 'Virus', 'SampleNum', " "'PlateLayout', 'StartDil', and 'DilFactor'") parser.add_argument('plate_layouts_dir', type=str, help='Directory' 'containing csv files specifying plate layouts.') return parser.parse_args()
4a6c309e90ec4b6752d4c197946414b5bed94c42
36,157
from functools import reduce from operator import mul def prod(iterable): """ Return the product of all numbers in an iterable. """ return reduce(mul, iterable)
d4ca281afd572aaae7da4bf13696ebc8d05be32f
36,161
def span_search(line): """期間予定であるかを検出し,boolで返す.""" zen_tilde = '~' # '~'が最初に検出された列 index = line.find(zen_tilde) # .findで検出されない場合-1が返される if index != -1: # 2~5年といった学年の間隔を~記号で表しているため、その対策 # ~の2つ後にあるというスタイルに依存 # つまり一日予定 if line.startswith('年', index + 2): return -1 # 月が変わる特殊な期間予定の場合の対策 elif line.startswith('月', index + 2) or line.startswith('月', index + 3): return -2 # 期間予定 else: return 0 # 一日予定 else: return -1
ed1b1ea120b5ef1c09d6dac97a491aaf1615897e
36,163
def get_project_from_manifest(manifest, pid): """ Returns the project entry from the manifest :param manifest: :param pid: :return: """ if 'projects' not in manifest: return None for project in manifest['projects']: if project['identifier'] == pid: return project return None
5f72faffeb14bc20568c2898ca3b9c65b2edb53f
36,164
def case_conversation(text, text_lower = True): """ This function returns the corpus with all tokens lowercase or uppercase text: String corpus text_lower: Boolen, default is True returns: Text corpus with all tokens converted """ if text_lower: return text.lower() else: return text.upper()
cc37b8473114de08c1fe0a01638eac063eebdc2f
36,165
def all_properties(obj): """ Return a list of names of non-methods of 'obj' """ noncallables = [] for name in dir(obj): if not hasattr(getattr(obj, name), '__call__'): noncallables.append(name) return noncallables
58a86250e03e9cb4c9f6567eaf072173ff419e73
36,167
import re def get_output1(filename): """ Supports Python, Julia Return set of tuples: source_file, output_file Use group_tuple_pairs() to organize these for output to yaml etc. Cases: (1) writer(open()) writer.writerows(object) (2) with open() write example from pythia: def compose_peerless(context, config, env): print(".", end="", flush=True) this_output_dir = context["contextWorkDir"] symlink_wth_soil(this_output_dir, config, context) xfile = pythia.template.render_template(env, context["template"], context) with open(os.path.join(context["contextWorkDir"], context["template"]), "w") as f: f.write(xfile) return context["contextWorkDir"] (3) file = open() write... close() """ def get_open_filepath(line): quoted_stuff = re.findall(r"['\"](.*?)['\"]", line) # single and double quotes if quoted_stuff: # remove a or w directives from list for s in ['a', 'w']: if s in quoted_stuff: quoted_stuff.remove(s) if quoted_stuff is None: # e.g.: with open(output_csv, 'w') as csv_file: obj_name = line.split('(')[0].split(',')[0] if obj_name in object_dict: obj_name = object_dict[obj_name] return obj_name else: return ' '.join(quoted_stuff) else: try: return line.split('(')[1].split(',')[0] except: return '' object_dict = {} # dict of objects defined in file output_files = [] with open(filename, 'rt', encoding='utf8') as file: lines = file.read().splitlines() line_num = None # store line with open() file_path = None for idx, line in enumerate(lines): line = line.strip() # ignore comment lines and remove inline comments if (line.startswith('#')): continue line = line.split('#')[0].strip() if line.count('=') == 1: # Parse simple assignment lines # e.g. xfile = pythia.template.render_template(env, context["template"], context) sa = line.split('=') object_dict[sa[0].strip()] = sa[1].strip() elif '.open(' in line: # Handle single line write e.g. csv.writer(open()) line_num = idx + 1 file_path = get_open_filepath(line) ## Handle with open() block elif ('with open(' in line): #t = (filename, "ln {0:>4}: {1}".format(idx+1, get_open_filepath(line))) #with_open = True line_num = idx + 1 file_path = get_open_filepath(line) elif ('write(' in line or 'safe_dump(' in line) and file_path is not None: obj_name = line.split('(')[1].split(')')[0] if obj_name in object_dict: obj_name = object_dict[obj_name] #output_dict = dict(line = line_num, path = file_path, write = obj_name) output_files.append((filename, line_num, file_path, obj_name)) file_path = None return output_files
27d9edc8053c1832f1b43a617bfda7772a538494
36,168
def power_level(serial: int, x: int, y: int) -> int: """Compute the power level of the fuel cell at x, y. """ rack_id = x + 10 p = rack_id * y + serial p *= rack_id p = (p // 100) % 10 return p - 5
316895b97f752867171ff4dd0463ea5395228b97
36,170
from pathlib import Path def get_nprocs(modelpath): """Return the number of MPI processes specified in input.txt.""" return int(Path(modelpath, 'input.txt').read_text().split('\n')[21].split('#')[0])
82faad4d21e5a9acb7de123338e71b1123ad79cc
36,172
import re def padding(str, p=' ', remove_empty_lines=True): """ normalize newlines remove leading and trailing spaces collapse spaces collapse newlines pad lines """ str = re.sub(r'[ \t]+\n', '\n', str) if remove_empty_lines: str = str.replace('\n\n', '\n') str = str.replace('\n\n', '\n') str = p + str.replace('\n', '\n' + p) return str
d04c4f9d2a2da4dac050bbe567482821f2b8de9a
36,173
def prep_data_single_sample_st( adata, adata_i, use_rep, features, blur_pix, histo, fluor_channels ): """ Prepare dataframe for tissue-level clustering from a single AnnData sample Parameters ---------- adata : anndata.AnnData AnnData object containing Visium data adata_i : int Index of AnnData object for identification within `st_labeler` object use_rep : str Representation from `adata.obsm` to use as clustering data (e.g. "X_pca") features : list of int or None, optional (default=`None`) List of features to use from `adata.obsm[use_rep]` (e.g. [0,1,2,3,4] to use first 5 principal components when `use_rep`="X_pca"). If `None`, use all features from `adata.obsm[use_rep]` blur_pix : int, optional (default=2) Radius of nearest spatial transcriptomics spots to blur features by for capturing regional information. Assumes hexagonal spot grid (10X Genomics Visium platform). histo : bool, optional (default `False`) Use histology data from Visium anndata object (R,G,B brightfield features) in addition to `adata.obsm[use_rep]`? If fluorescent imaging data rather than brightfield, use `fluor_channels` argument instead. fluor_channels : list of int or None, optional (default `None`) Channels from fluorescent image to use for model training (e.g. [1,3] for channels 1 and 3 of Visium fluorescent imaging data). If `None`, do not use imaging data for training. Returns ------- pd.DataFrame Clustering data from `adata.obsm[use_rep]` """ tmp = adata.obs[["array_row", "array_col"]].copy() tmp[[use_rep + "_{}".format(x) for x in features]] = adata.obsm[use_rep][ :, features ] if histo: assert ( fluor_channels is None ), "If histo is True, fluor_channels must be None. \ Histology specifies brightfield H&E with three (3) features." print("Adding mean RGB histology features for adata #{}".format(adata_i)) tmp[["R_mean", "G_mean", "B_mean"]] = adata.obsm["image_means"] if fluor_channels: assert ( histo is False ), "If fluorescence channels are given, histo must be False. \ Histology specifies brightfield H&E with three (3) features." print( "Adding mean fluorescent channels {} for adata #{}".format( fluor_channels, adata_i ) ) tmp[["ch_{}_mean".format(x) for x in fluor_channels]] = adata.obsm[ "image_means" ][:, fluor_channels] tmp2 = tmp.copy() # copy of temporary dataframe for dropping blurred features into cols = tmp.columns[ ~tmp.columns.str.startswith("array_") ] # get names of training features to blur # perform blurring by nearest spot neighbors for y in range(tmp.array_row.min(), tmp.array_row.max() + 1): for x in range(tmp.array_col.min(), tmp.array_col.max() + 1): vals = tmp.loc[ tmp.array_row.isin([i for i in range(y - blur_pix, y + blur_pix + 1)]) & tmp.array_col.isin( [i for i in range(x - 2 * blur_pix, x + 2 * blur_pix + 1)] ), :, ] vals = vals.loc[:, cols].mean() tmp2.loc[ tmp2.array_row.isin([y]) & tmp2.array_col.isin([x]), cols ] = vals.values # add blurred features to anndata object adata.obs[["blur_" + x for x in cols]] = tmp2.loc[:, cols].values return tmp2.loc[:, cols]
310fe731e44d47bca1af0b0a782ae5fdc2caa9f3
36,176
def _get_QColorButton(self): """ Get current value for QColorButton """ return self.color()
7c56c2401c9c367f34b44b5ec347f5fd10907abc
36,178
def _unique(session, cls, queryfunc, constructor, kw, unique_key='name'): """ Checks if a class instance is unique or not, only for those classes which have unique attributes Like the User name and Image img_path :param session: Session object :param cls: Class object :param queryfunc: The query that gets the unique value :param constructor: Constructor function, currently default :param kw: Arguments, please use key worded :param unique_key: the name of the unique attribute :return: """ cache = getattr(session, '_unique_cache', None) if cache is None: session._unique_cache = cache = {} key = (cls, kw.get(unique_key)) if key in cache: print(f'The {kw.get(unique_key)} {unique_key} is not unique, try something else!') return cache[key] else: with session.no_autoflush: q = session.query(cls) q = queryfunc(q, kw.get(unique_key)) obj = q.first() if not obj: obj = constructor(**kw) session.add(obj) else: if unique_key == 'name': print(f'The {obj.name} {unique_key} is not unique, try something else!') else: print(f'The {obj.img_path} {unique_key} is not unique, try something else!') cache[key] = obj return obj
b1ca3f004a288c489f424ae6c60e5f71914f5986
36,179
def ALMACombineCals(Cal1, Cal2=None, Cal3=None, Cal4=None): """ Combine a unique list of calibrators Drops None sources returns list of unique names * Cal1 = List of calibrators (from ALMACalModel) * Cal2 = List of calibrators, ignore if None * Cal3 = List of calibrators, ignore if None * Cal4 = List of calibrators, ignore if None """ ################################################################ clist = [] # Calibrator list for Cal in Cal1: if Cal['Source'] not in clist: if Cal['Source']: clist.append(Cal['Source'].strip()) if Cal2: for Cal in Cal2: if Cal['Source'] not in clist: if Cal['Source']: clist.append(Cal['Source'].strip()) if Cal3: for Cal in Cal3: if Cal['Source'] not in clist: if Cal['Source']: clist.append(Cal['Source'].strip()) if Cal4: for Cal in Cal4: if Cal['Source'] not in clist: if Cal['Source']: clist.append(Cal['Source'].strip()) return clist
18866c26f904fbd1fa77651112efba77a3d7ff55
36,180
import pathlib def is_relative_to(path: pathlib.Path, base: pathlib.Path) -> bool: """Check whether `path` is contained inside `base`.""" try: path.relative_to(base) return True except ValueError: return False
6a9bed0700d87d9c74ba8980f25f3de522fa5ca8
36,181
import os def blankoutComments( filename ): """ Replace a file with blanks everywhere the file had C/C++ comments. Return the resulting file as a list of strings. """ tmp_file = '/tmp/inserter.' + str(os.getpid()) os.system( 'stripcomments.py infile=' + filename + ' language=cpp ' + 'replace=True > ' + tmp_file ) lstr = open(tmp_file).readlines() os.unlink( tmp_file ) return lstr
6b8d9922b77d220d8309aeb1f0caa61bcd4e02ab
36,182
def skl_calculation_metrics(result_labels, test_labels): """ :param result_labels: :param test_labels: :return: ref: http://blog.exsilio.com/all/accuracy-precision-recall-f1-score-interpretation-of-performance-measures/ """ tp = 0.0 fp = 0.0 tn = 0.0 fn = 0.0 size = len(result_labels) for i in range(size): # Count true positives if result_labels[i] == 'g' and test_labels[i] == 'g': tp += 1.0 # Count false positives if result_labels[i] == 'g' and test_labels[i] == 'b': fp += 1.0 # Count true negatives if result_labels[i] == 'b' and test_labels[i] == 'b': tn += 1.0 # Count false negatives if result_labels[i] == 'b' and test_labels[i] == 'g': fn += 1.0 accuracy = (tp + tn) / (tp + fp + tn + fn) precision = tp / (tp + fp) recall = tp / (tp + fn) f1_score = 2.0 * (recall * precision) / (recall + precision) return accuracy, precision, recall, f1_score
dfc2bb19a4d674e83aaed953017ec2e4b91fee3e
36,183
def multiples35(number): """ If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. Finish the solution so that it returns the sum of all the multiples of 3 or 5 below the number passed in. Note: If the number is a multiple of both 3 and 5, only count it once. :param number: An integer value. :return: The sum of all the multiples of 3 or 5 below the number passed in. """ return sum(x for x in range(number) if x % 3 == 0 or x % 5 == 0)
53582256eb17a81c7c5b90be13d9bb89943b2693
36,184
from functools import reduce import operator import collections def strings_to_wordsets(strings, stop_words=None): """Build a dict of wordsets from a list of strings, with optional filter. For each distinct word found in the list of strings, the wordset dict will map that word to a set of the strings that contain it. A list of words to ignore may be passed in stop_words. """ string_words = [set(w.split(' ')) for w in (s.lower() for s in strings)] words = reduce(operator.or_, string_words) if stop_words: words -= set(stop_words) wordsets = collections.OrderedDict( (w, set(strings[i] for i, s in enumerate(string_words) if w in s)) for w in sorted(words)) return wordsets
407f604d6fe78e6aad10e972fcda63ebb42209f1
36,185
def tokenize(sentence): """ Converts a single sentence into a list of individual significant units Args: sentence (str): Input string ie. 'This is a sentence.' Returns: list<str>: List of tokens ie. ['this', 'is', 'a', 'sentence'] """ tokens = [] class Vars: start_pos = -1 last_type = 'o' def update(c, i): if c.isalpha() or c in '-{}': t = 'a' elif c.isdigit() or c == '#': t = 'n' elif c.isspace(): t = 's' else: t = 'o' if t != Vars.last_type or t == 'o': if Vars.start_pos >= 0: token = sentence[Vars.start_pos:i].lower() if token not in '.!?': tokens.append(token) Vars.start_pos = -1 if t == 's' else i Vars.last_type = t for i, char in enumerate(sentence): update(char, i) update(' ', len(sentence)) return tokens
05848512b0d171151056221b13cd54eae7680538
36,186
from typing import Union import re def _cast_valid_types(content: Union[str, int, bool]) -> Union[str, bool, int]: """ Cast an input that explicitly reads "true" or "false" (case-insensitive) as a boolean type and cast all strings of only digits as an integer type. This function does nothing and returns the same value if the input is not a string. :param content: The string of content to parse out compatible types for :return: The value casted as the type detected """ if type(content) == str: # Check if the response matches a boolean's text (must be explicit to prevent coercion of ints like '1' -> True) if content.lower() == ('true' or 'false'): content = bool(content) # Check if the response is an integer and only an integer (explicitly define match to avoid type coercion) elif re.fullmatch('\\d+', content): content = int(content) return content
c06e3635c1f5d4ac6d33939db8d552d58803dda1
36,187
def more_like_this(_es, es_index, field, like_list, min_term_freq, max_query_terms): """Build and execute a more like this query on the like document See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-mlt-query.html Returns result (list): list of documents that match the like document. """ queries = [{ "stored_fields": field, "query": { "more_like_this": { "fields": field, "like": like, "min_term_freq": min_term_freq, "max_query_terms": max_query_terms } } } for like in like_list] results = [] for query in queries: res = _es.search(index=es_index, body=query) results.append([hit['fields'][field[0]][0] for hit in res['hits']['hits']]) return results
a48fcc6e44b9a25bafd7936122c5ecc91cfdb932
36,189
import sys import os def get_dirs(src,order="none"): """ Given a path to where simulation directories live, create a list of directories from which simulation data will be extracted in a user-specified order. Parameters ---------- src : str Path to simulation suite directory which contains all simulation sub directories order : str How user wants dataset ordered. Defaults to "None" which means the code loads in the data in whatever order the simulation dirs are in. Options: "none", "grid" Returns ------- dirs : list List of simulation directories ordered by order """ # No order, return via listdir which does NOT preserve order print("Finding simulation subdirectories in %s ordered by %s." % \ (src, order)) sys.stdout.flush() # No order if order.lower() == "none": dirs = list(filter(lambda x: os.path.isdir(os.path.join(src, x)), os.listdir(src))) # Grid order. Preserves vspace-given order for a grid of simulations elif order.lower() == "grid": dirs = list(sorted(filter(lambda x: os.path.isdir(os.path.join(src, x)), os.listdir(src)))) # Not a valid option! else: raise ValueError("Invalid order: %s." % order) return dirs
32f72230e5a78a3ed5e9c5158bf660bc1c1693c4
36,190
import re def _getXMLText(fileobj): """Convenience function for reading the XML header data in a ShakeMap grid file. :param fileobj: File-like object representing an open ShakeMap grid file. :returns: All XML header text. """ tline = fileobj.readline() datamatch = re.compile('grid_data') xmltext = '' tlineold = '' while not datamatch.search(tline) and tline != tlineold: tlineold = tline xmltext = xmltext + tline tline = fileobj.readline() xmltext = xmltext + '</shakemap_grid>' return xmltext
a44091de4543142a83a13b010de7f5a471a21df8
36,191
def decorator_info(f_name, expected, actual, flag): """ Convenience function returns nicely formatted error/warning msg. :param f_name: :param expected: :param actual: :param flag: :return: """ format = lambda types: ', '.join([str(t).split("'")[1] for t in types]) expected, actual = format(expected), format(actual) msg = "'{}' method ".format(f_name) \ + ("accepts", "returns")[flag] + " ({}), but ".format(expected) \ + ("was given", "result is")[flag] + " ({})".format(actual) return msg
eeba0b04c2dfd6be77862d2f301d7e47c573b27a
36,192
def map_l2dist_gaussianmech_renyiDP(sensitivity, scale, alpha): """map an L2 distance `sensitivity` through the gaussian mechanism with parameter `scale` to (alpha, epsilon)-RDP Proposition 7 and Corollary 3: https://arxiv.org/pdf/1702.07476.pdf#subsection.6.3 :param sensitivity: maximum L2 distance perturbation of a query :param scale: standard deviation of gaussian noise :param alpha: order of renyi divergence > 1 :returns epsilon """ return alpha * (sensitivity / scale) ** 2 / 2
98b5454fdc299b345a73ad1473ff56e249a29397
36,193
import json def to_dict(data): """Convert json to dict data, if input is not json return None""" if isinstance(data, dict): return data try: value = json.loads(data) if not isinstance(value, dict): raise return value except: return None
9419173d027b998cebe949e7a43424834fd50d7c
36,194
from typing import Mapping from typing import Any def _get_param_or_die(input_params: Mapping[str, Any], param: str) -> Any: """Returns value of param. Dies with user-formatted message if not defined. Args: input_params: Mapping from input parameter names to values. param: Name of the param to get the value of. Returns: Value of the given param. Raises: ValueError: User formatted message on error. """ value = input_params.get(param, None) if not value: raise ValueError('Missing parameter: %s' % param) return value
f06bfb5ae5393cf0e41a33db1c03797e940fcdef
36,197
from datetime import datetime def get_now(): """ Return the current timestamp as a string to be sent over the network. """ return datetime.utcnow().isoformat()
c9ba1ee9a7450d58777265cea20657ada11d8636
36,198
def sort_issues(issues): """Sorted list of issues Args: issues (list): list of issue dictionaries Returns: list: list of sorted issue dictionaries """ issues.sort( key=lambda issue: ( issue.filename, issue.linenumber, issue.rule.id ) ) return issues
74aa0fe71962c47d6af8e73ce014b13be9921cf1
36,199
def fieldmap_minmax(fieldmap, variable): """Data limits for a given varible across all zones in a fieldmap""" limits = None for z in fieldmap.zones: if limits is None: limits = z.values(variable).minmax() else: low, high = z.values(variable).minmax() limits = (min(limits[0], low), max(limits[1], high)) return limits
662da76d19fd73c8e93b0cbaf80a04d8f14f3135
36,200
def bogus_func(obj, *args, **kwargs): """Function that just returns the arguments modified by the decorator.""" return (obj, args, kwargs)
c37dc3a21953571f4a81e1178b379c78aaeb510d
36,201
def reverse(sentence): """ split original sentence into a list, then append elements of the old list to the new list starting from last to first. then join the list back toghether. """ original = sentence.split() reverse = [] count = len(original) - 1 while count >= 0: reverse.append(original[count]) count = count - 1 result = " ".join(reverse) return result
258bb97834a177a90dbfcf436d3228bb7f9a7237
36,202
from typing import Dict import torch def create_ner_conditional_masks(id2label: Dict[int, str]) -> torch.Tensor: """Create a NER-conditional mask matrix which implies the relations between before-tag and after-tag. According to the rule of BIO-naming system, it is impossible that `I-Dog` cannot be appeard after `B-Dog` or `I-Dog` tags. This function creates the calculable relation-based conditional matrix to prevent from generating wrong tags. Args: id2label: A dictionary which maps class indices to their label names. Returns: A conditional mask tensor. """ conditional_masks = torch.zeros(len(id2label), len(id2label)) for i, before in id2label.items(): for j, after in id2label.items(): if after == "O" or after.startswith("B-") or after == f"I-{before[2:]}": conditional_masks[i, j] = 1.0 return conditional_masks
0cda8db465b039349eff1022a8cd2cc071f21295
36,203
def acquire_image_url(soup): """ Take a BeautifulSoup content of a book page. Return the url of the image of the book. """ partial_url = soup.img['src'][5:] image_url = f"http://books.toscrape.com{partial_url}" return image_url
c2c14823f3fa1dbe30838dbab2513653d7c0fa3c
36,204
def encode_varint_1(num): """ Encode an integer to a varint presentation. See https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints on how those can be produced. Arguments: num (int): Value to encode Returns: bytearray: Encoded presentation of integer with length from 1 to 10 bytes """ # Shift sign to the end of number num = (num << 1) ^ (num >> 63) # Max 10 bytes. We assert those are allocated buf = bytearray(10) for i in range(10): # 7 lowest bits from the number and set 8th if we still have pending # bits left to encode buf[i] = num & 0x7f | (0x80 if num > 0x7f else 0) num = num >> 7 if num == 0: break else: # Max size of endcoded double is 10 bytes for unsigned values raise ValueError("Out of double range") return buf[:i + 1]
3a5883a352a85c3c472889b8b1e347ba41df0615
36,205