content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import time from datetime import datetime def datetime_from_utc_to_local(utc_datetime): """ convert a datetime from UTC to local time :param utc_datetime: datetime object of the game start time in UTC :return: returns the datetime converted from UTC to local time """ now_timestamp = time.time() offset = datetime.fromtimestamp(now_timestamp) - datetime.utcfromtimestamp(now_timestamp) return utc_datetime + offset
3c51a58a35bebf9a5ff1eca4bd2a334f798ae68d
91,448
from typing import Iterable from typing import Sized def max_sequence_length(sequences: Iterable[Sized]): """Computes the length of the longest sized element in an iterable. Args: sequences (Iterator): An iterator of some sized type Returns: max_len (int): The length of the longest sized type in sequences """ max_len = 0 for entry in sequences: if len(entry) > max_len: max_len = len(entry) return max_len
4101ccb8fb0ed6d5ca2f49e3d45ed729d2ffcd4b
91,449
def greet(personal, family, title="", polite=False): ### "Doc" """ Generate a greeting string for a person. Parameters ---------- personal: str A given name, such as Will or Jean-Luc family: str A family name, such as Riker or Picard title: str An optional title, such as Captain or Reverend polite: bool True for a formal greeting, False for informal. Returns ------- string An appropriate greeting """ ### "Content" greeting= "How do you do, " if polite else "Hey, " if title: greeting+=title+" " greeting+= personal + " " + family +"." return greeting
78ccbb0e1fdf3db9a0b5d17631755f6007c3407e
91,452
def namedtuple_lower(t): """Lower cases a namedtuple""" return type(t)(*[s.lower() for s in t])
8ad93713ea12b0b9cfa48dce1629ab8da7ce7cfe
91,459
def convert_far_to_cel(cel_deg): """Convert fahrenheit to celsius""" return (cel_deg - 32) * (5/9)
01ca7e15d6808e6bd46da6ad437cb8af08732448
91,460
def rotate(message): """ Returns a string with the first character of 'message' moved to the end """ return f"{message[1:]}{message[0]}"
ca3196ffc135b2b8480fa0220183572cc5bcbd76
91,464
def update_func_meta(fake_func, real_func): """ Set meta information (eg. __doc__) of fake function to that of the real function. @rtype: function @return Fake function with metadata of the real function. """ fake_func.__module__ = real_func.__module__ fake_func.__name__ = real_func.__name__ fake_func.__doc__ = real_func.__doc__ fake_func.__dict__.update(real_func.__dict__) return fake_func
c416143a6b6db13b5b0949c2f8fd1b61792a8143
91,468
def __myfloat(x): """ Private method that returns 0.0 if a string cannot be converted to float, or the result of the float() call """ try: y=float(x) except ValueError: y=0.0 return y
da067a91f3932fab715c69c47fe0a1e90a75e8ba
91,472
def oneVoxelNoise(data, loc, scale=1.1): """applies 1-voxel scaling to the provided image matrix The noise introduced is equivalent to a point magnification at the location provided with scaled intensity equal to the value provided (default value is an increase in intensity by 10%). The location provided must be the same length or 1 fewer than the dimensions of the data. When the location provided contains fewer dimensions than the data matrix, the location is used to index the first dimensions, and noise is applied across the entire last dimension. Parameters ---------- data : ndarray Image to be perturbed with D dimensions. loc : list, tuple List of coordinate locations for applying noise. Must be length D or D-1 scale : float Multiplier for the signal at the target location. Default is 1.1 Returns ------- data : ndarray The perturbed data matrix with signal amplification by scale at location loc. """ loc = tuple(loc) # Lists cannot be used to index arrays, but tuples can if len(loc) < len(data.shape): # If fewer dimensions for location than data data[loc, :] = data[loc, :] * (scale) # Apply noise else: data[loc] = data[loc] * (scale) # Apply noise return data
eadf4383f5db6c7eb760f0acd6e487f5bb2fce5c
91,475
def get_domain(total, element, is_y, space=0.01): """returns evenly spaced domain for an element in a grid plot Parameters ---------- total : int the total number of elements on the axis element : int the element number to compute the domain for is_y : bool if True, this is for a y-coordinate domain. This is reversed so the result is in cartesian, not array, coordinates space : float the separation between elements """ if total == 1: return [0, 1] if element > total - 1: raise ValueError(f"{element} index too big for {total}") per_element = 1 / total space = min(space / 2, per_element / 10) bounds = [per_element * i for i in range(total + 1)] domains = [ (bounds[k] + space, bounds[k + 1] - space) for k in range(len(bounds) - 1) ] if is_y: element = total - element - 1 return domains[element]
238892a4685df01f1c0cbe04e250d48fcd75b2d2
91,479
def discovery_status_to_text(status): """ Convert a Discovery Status code into meaningful text. Args: status: Staus code from Orion. Returns: String: Human text for status code. """ discovery_statuses = {"0": 'Unknown', "1": 'InProgress', "2": 'Finished', "3": 'Error', "4": "NotScheduled", "5": "Scheduled", "6": "NotCompleted", "7": "Canceling", "8": "ReadyForImport"} return discovery_statuses[status]
acddb0b1d6472523950eaa6b9cc00f8e54508b19
91,480
import json def read_from_json_file(filename: str) -> dict: """ Read the data from a json file. Parameters ---------- filename : str name of the file Returns ------- data : dict all data of the file """ file = open(filename, 'r') data = json.loads(file.read()) file.close() return data
286a0b850dea90dcd69326788f32778689bac0e3
91,487
def order_metadata(metadata: list) -> list: """Orders 2-element metadata according to measurementDate.""" key = "measurementDate" if len(metadata) == 2 and metadata[0][key] > metadata[1][key]: metadata.reverse() return metadata
a9c31507bb9e485f40d62170a6e1f296777678fe
91,490
def get_lang_owner(cursor, lang): """Get language owner. Args: cursor (cursor): psycopg2 cursor object. lang (str): language name. """ query = ("SELECT r.rolname FROM pg_language l " "JOIN pg_roles r ON l.lanowner = r.oid " "WHERE l.lanname = %(lang)s") cursor.execute(query, {'lang': lang}) return cursor.fetchone()[0]
8334db91d146c5221d7b643eb182398054b5d807
91,494
def mode(is_append: bool): """Write mode dependend on is_append.""" return "a" if is_append else "w"
55836889d7d22592eb1396dbed5a621f627bd1a5
91,495
def get_temp_result(initial_out, final_out): """ Takes NN outputs before and after removal to compute absolute fractional differences :param initial_out: :param final_out: :return: """ # initially most confident class initial_class_scores, predicted_class = initial_out.max(1) # same value after modification final_class_scores = final_out.index_select(1, predicted_class).max(0)[0] # absolute fractional difference of raw results tmp_result = abs(final_class_scores - initial_class_scores) / initial_class_scores return tmp_result
d0bdb4387326e4e78a31864943a67cc9f86a61b1
91,501
def inHg_to_hPa(p_inHg): """Convert inches of mercury to hectopascals.""" if p_inHg is None: return None return p_inHg * 33.86389
6ebe35b2acb3f5b179b2c73b1f4f19b6c50d3e99
91,503
import warnings def unpack_1tuple(tup): """ If we have a length-1 tuple/list that contains a slice, unpack to just the slice. Notes ----- The list case is deprecated. """ if len(tup) == 1 and isinstance(tup[0], slice): # if we don't have a MultiIndex, we may still be able to handle # a 1-tuple. see test_1tuple_without_multiindex if isinstance(tup, list): # GH#31299 warnings.warn( "Indexing with a single-item list containing a " "slice is deprecated and will raise in a future " "version. Pass a tuple instead.", FutureWarning, stacklevel=3, ) return tup[0] return tup
e1b77183f064af7918b4c6090eb1a871eb148395
91,510
def dist(point1, point2): """ Definition --- Method to find the manhattan distance between 2 points Parameters --- point1, point2: points to calcualte distance Returns --- dist: distance between the two points """ return(abs((point1[0] - point2[0]) + (point1[1] - point2[1])))
e7eaaec8525900fdb0fb8dc8d026b21b00dbefad
91,511
def apply_mask(img, mask): """ Applies a boolean mask to a 2d image Parameters ---------- img: 2darray values of Gaussian source mask: bool mask for sampling frequencies Returns ------- out: 2darray array with sampled frequencies """ img = img.copy() img[~mask.astype(bool)] = 0 return img
95177a1688095b5a0975f0f4c2b38b4eec38c14e
91,512
def is_number(s): """ check if input is a number (check via conversion to string) """ try: float(str(s)) return True except ValueError: return False
3dc47fde63c5af88ef4c0bb88e86861ffc53275b
91,519
import tempfile def tmpdir (dir=None): """Return a temporary directory for extraction.""" return tempfile.mkdtemp(suffix='', prefix='Unpack_', dir=dir)
3bd005c6ac9080a7e02e8f72db94904455b9cd10
91,521
def BAIM(b4, b5, nodata=-9999): """Computes the burned area index for mapping Mediterranean burn scars. """ x = 1 / ( pow(b4-0.05,2) + pow(b5-0.2,2) ) x[(b4==nodata) | (b5==nodata)] = nodata return(x)
3822d203b22b9a9e3dcb73baac83b1c270961b99
91,524
def get_linked_constellations(point_to_constellation, points_in_range): """Get constellations to which are connected points_in_range.""" linked_constellations = set() for point_in_range in points_in_range: constellation = point_to_constellation.get(point_in_range) if constellation is not None: linked_constellations.add(constellation) return linked_constellations
079ee29c36a1ea58917307215f8782d11125a0a9
91,527
def yielder(it): """ Yield from generator. Adapted from: https://stackoverflow.com/a/40701031 """ try: return next(it) except StopIteration: return None
91daca1153c098b26dc6e1f23db9ceb5ed7f22bc
91,529
def find_spelling(n): """ Finds d, r s.t. n-1 = 2^r * d """ r = 0 d = n - 1 # divmod used for large numbers quotient, remainder = divmod(d, 2) # while we can still divide 2's into n-1... while remainder != 1: r += 1 d = quotient # previous quotient before we overwrite it quotient, remainder = divmod(d, 2) return r, d
b3dcbb35b43e5fb321176dc4279c1a33dd95bf47
91,532
def get_model_input_shape(model): """ Returns the required (minimum) input size of a FCNN model by analyzing the conv1d layers. model: Fully convolutional neural net keras model for which the input shape is determined. """ model_input_size = 1 for layer in model.layers: if layer.name.find('conv') != -1: dilation_rate = layer.dilation_rate[0] kernel_size = layer.kernel_size[0] model_input_size += (kernel_size - 1) * dilation_rate return model_input_size
8c585be22239c63514e7f0274363904b6126f494
91,533
import pkg_resources def resource_filename(resources_name) -> str: """ Get the absolute path of a resource file of the package. """ return pkg_resources.resource_filename(__name__, resources_name)
b967bfaffff3416036c27f774fc2f46b7fa474d3
91,536
def createResult(originalChoreList): """ Concatenates the 0th index of every chore in the chore array after results have been calculated. """ result = "" for x in originalChoreList: result+=x[0] return result
74f9ab990c4eab502edb422c79822db7bfae23e5
91,539
def is_readable_codon(nucleic_sequence: str) -> bool: """ A function that determines if it is a readable codon :param nucleic_sequence: :return is_readable_codon: """ # if length of codon string is multiple of 3, it's readable if len(nucleic_sequence) % 3 == 0: is_readable = True else: is_readable = False return is_readable
16359877c0dc04302ba9037711eb0059f83838d3
91,540
def corrupt_with_vowels(input): """ Takes in a base string input. Returns a string that has been corrupted by removing all the vowels (a, e, i, o, u) from it. Parameters: input: Base string that will be corrupted. Returns: String input with all vowels removed. >>> corrupt_with_vowels('buy and sell') 'by nd sll' >>> corrupt_with_vowels('gold gold gold') 'gld gld gld' >>> corrupt_with_vowels('AeI oU') ' ' # Add AT LEAST 3 doctests below, DO NOT delete this line >>> corrupt_with_vowels('') '' >>> corrupt_with_vowels('glyph') 'glyph' >>> corrupt_with_vowels('oiaueoieoeaoieoiIIOAIOEI') '' >>> corrupt_with_vowels('EIAUIEUIAiO OOOOO!O') ' !' """ vowels = ['a', 'e', 'i', 'o', 'u'] if len(input) <= 1: if input.lower() in vowels: return '' return input return corrupt_with_vowels(input[0]) + corrupt_with_vowels(input[1:])
aa57ae6124be96f2dc2b5010ac9525085791f3bb
91,549
from pathlib import Path def check_file_exists(location: Path | str): """Check if file exists""" return Path(location).is_file()
d3cbcea46fb0f5ae0cb3e92f14060588abca8be5
91,550
import math def square_root(num: int) -> float: """ Square root of specific number """ return math.sqrt(num)
64bcdfd4cbb37ee74a299fa4ea247112f848848b
91,552
def get_harvest_info(catalog_record): """Is catalog record harvested. Returns True if dataset was harvested from a third party source Arguments: catalog_record (dict): The catalog record. Returns: bool: Is the cr harvested. """ return catalog_record.get('data_catalog.catalog_json.harvested', False)
16785479f32f746a834689a96c989b4c05c0b413
91,555
def get(isamAppliance, id, check_mode=False, force=False): """ Get information on particular user by id """ return isamAppliance.invoke_get("Retrieving user", "/sysaccount/users/{0}/v1".format(id))
b8cbe2bbb74915e1a3a67617b210a5c295d13d6a
91,556
def partial_velocity(vel_list, u_list, frame): """Returns a list of partial velocities. For a list of velocity or angular velocity vectors the partial derivatives with respect to the supplied generalized speeds are computed, in the specified ReferenceFrame. The output is a list of lists. The outer list has a number of elements equal to the number of supplied velocity vectors. The inner lists are, for each velocity vector, the partial derivatives of that velocity vector with respect to the generalized speeds supplied. Parameters ========== vel_list : list List of velocities of Point's and angular velocities of ReferenceFrame's u_list : list List of independent generalized speeds. frame : ReferenceFrame The ReferenceFrame the partial derivatives are going to be taken in. Examples ======== >>> from sympy.physics.mechanics import Point, ReferenceFrame >>> from sympy.physics.mechanics import dynamicsymbols >>> from sympy.physics.mechanics import partial_velocity >>> u = dynamicsymbols('u') >>> N = ReferenceFrame('N') >>> P = Point('P') >>> P.set_vel(N, u * N.x) >>> vel_list = [P.vel(N)] >>> u_list = [u] >>> partial_velocity(vel_list, u_list, N) [[N.x]] """ if not hasattr(vel_list, '__iter__'): raise TypeError('Provide velocities in an iterable') if not hasattr(u_list, '__iter__'): raise TypeError('Provide speeds in an iterable') list_of_pvlists = [] for i in vel_list: pvlist = [] for j in u_list: vel = i.diff(j, frame) pvlist += [vel] list_of_pvlists += [pvlist] return list_of_pvlists
862ff9fca056a3983d45a5ebc643d5cd9c704e34
91,557
def force_decode(bstring): """Tries to decode a bytestring to text. If that fails, just repr it.""" try: return bstring.decode() except UnicodeDecodeError: return repr(bstring)[2:-1]
b6ce799d82f324ba5b1bdc78b0bca34c74e41e08
91,559
def get_aligned_sequences(mafft_output): """ Parse aligned FASTA sequences from MAFFT output. :param mafft_output: MAFFT program output in FASTA format :return: Array of the aligned sequences in FASTA format """ # mafft_lines = Array of FASTA lines mafft_lines = mafft_output.splitlines() headers = [line for line in mafft_lines if line.startswith(">")] sequences = ["".join(sequence.split("\n")[1:]) for sequence in mafft_output.split(">") if sequence] sequence_part = [i + "\n" + j + "\n" for i, j in zip(headers, sequences)] return sequence_part
4841073ca19eb30d4bb2e1d71a7e06d2d344d8ce
91,563
import ast def get_accessed(node): """Get names, but ignore variables names to the left hand side That is to say, in case of a = b + 1 we consider b as "being accessed", while a is not. """ if isinstance(node, ast.Assign): return get_accessed(node.value) elif isinstance(node, ast.Name): return {node.id} names = set() if isinstance(node, list): for x in node: names |= get_accessed(x) else: for x in ast.iter_child_nodes(node): names |= get_accessed(x) return names
4ef89ccd5c8937ab0add5bf96957784ad034dea5
91,564
import torch def sample_pdf(bins, weights, Nsamples=64, det=False): """ Sample @Nsamples depths from @bins with distribution defined by @weights. Inputs: bins: depth bins boundaries, (*Nrays, Nbins+1) where Nbins == Nsamples(coarse)-2 1 2 3 4 Nsamples(coarse)=5 z_vals (coarse) : x x x x x bins (midpoints): [ ][ ][ ] 1 2 Nbins=3 weights: (*Nrays, Nbins) w_1 w_2 w_Nbins Nsamples: the number of samples to draw from the distribution det: sampling deterministic or not Outputs: z_vals: the sampled depth values from bins, (*Nrays, Nsamples) NOTE: @weights should be detached before feeding into the function. """ eps = 1e-5 *Nrays, Nbins = weights.shape weights = weights + eps # prevent division by zero (don't do inplace op!) pdf = weights / weights.sum(-1, keepdim=True) # (*Nrays, Nbins) cdf = torch.cumsum(pdf, -1) # (*Nrays, Nbins) cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1) # (*Nrays, Nbins+1) # Prepend 0 to CDF if det: u = torch.linspace(0, 1, Nsamples, device=bins.device) u = u.expand([*Nrays, Nsamples]) else: u = torch.rand([*Nrays, Nsamples], device=bins.device) u = u.contiguous() # Find correct intervals for u inds = torch.searchsorted(cdf, u, right=True) # (*Nrays, Nsamples) below = torch.clamp_min(inds-1, 0) # (*Nrays, Nsamples) above = torch.clamp_max(inds, Nbins) # (*Nrays, Nsamples) # print('below.shape: ', below.shape) # print('above.shape: ', above.shape) cdf_below = torch.gather(cdf, -1, below) # (*Nrays, Nsamples) cdf_above = torch.gather(cdf, -1, above) # (*Nrays, Nsamples) # print('cdf_below.shape: ', cdf_below.shape) # print('cdf_above.shape: ', cdf_above.shape) bins_below = torch.gather(bins, -1, below) # (*Nrays, Nsamples) bins_above = torch.gather(bins, -1, above) # (*Nrays, Nsamples) # print('bins_below.shape: ', bins_below.shape) # print('bins_above.shape: ', bins_above.shape) cdf_diff = cdf_above - cdf_below # print('cdf_diff.shape: ', cdf_diff.shape) cdf_diff[cdf_diff<eps] = 1.0 t = (u - cdf_below) / cdf_diff z_vals = bins_below + t * (bins_above - bins_below) # (*Nrays, Nsamples) return z_vals
e18f9a7e720beea0ba7ebb2e7f924eefc7c05932
91,576
from typing import Iterable from typing import Any def null(it: Iterable[Any]) -> bool: """ Return empty or boolean value of iterable object Args: it: Iterable object Examples: >>> fpsm.null([]) True >>> fpsm.null(range(100)) False """ return not list(it)
c4fdf053bbc0ea37e64f895af577608161a8ef5c
91,578
import torch def load_model(model_checkpoint): """ Loads the model from a checkpoint file. Arguments: model_checkpoint: Path to checkpoint file Returns: model: Loaded model. idx_class_mapping: Index to class mapping for further evaluation. """ checkpoint = torch.load(model_checkpoint) arch = checkpoint["arch"] my_local = dict() exec("model = models.{}(pretrained=True)".format(arch), globals(), my_local) model = my_local['model'] for param in model.parameters(): param.requires_grad = False model.classifier = checkpoint["classifier"] model.load_state_dict(checkpoint["state_dict"]) class_idx_mapping = checkpoint["class_idx_mapping"] idx_class_mapping = {v: k for k, v in class_idx_mapping.items()} return model, idx_class_mapping
4e36b38cb6eeaf9b8b7a0845ea20758521ecd78e
91,586
import json def PrettyPrint(contents): """Pretty prints a fieldtrial configuration. Args: contents: File contents as a string. Returns: Pretty printed file contents. """ return json.dumps(json.loads(contents), sort_keys=True, indent=4, separators=(',', ': ')) + '\n'
5774a3fa1b7925d3d977a934814e2be32c3414d1
91,592
def has_namespace(ident): """ Returns True if a namespace is given with the identifier, False otherwise. >>> has_namespace('foo') False >>> has_namespace('foo:bar') True """ return ':' in ident
3d09ed546da2daa6f187563eb95e9a487333fdbf
91,598
def splitPFAMVersion(PFAM_designation): """ remove the version of the PFAM (PFXXXX.YY) remove YY :param PFAM_designation: PFAM name :type PFAM_designation: str :return: pfam name :rtype str """ pfam_name_version = PFAM_designation.split('.') assert len(pfam_name_version) == 2 return pfam_name_version[0]
7b6f7f8de8b5c2ab96f4f18d21a298f2f37c0006
91,601
def string_y_n(string, default=None): """ Strict mapping of a given string to a boolean. If it is `'y'` or `'Y'`, `True` is returned. If it is `'n'` or `'N'`, `True` is returned. If it is empty or None (evaluates to False), and `default` is set, `default` is returned. Else a `ValueError` is raised. :param string: The input :type string: str :param default: default result for empty input :type default: None | bool :raises ValueError: If it is not any of ['y', 'Y', 'n', 'N'] :return: result (True/False/default) :rtype: bool """ if not string and default is not None: return default if string not in ['y', 'Y', 'n', 'N']: raise ValueError('Please enter y or n.') if string == 'y' or string == 'Y': return True if string == 'n' or string == 'N': return False # end if
c7e4721a80f9d30e5a1e0c0cdc0e5fe5cea0c58d
91,606
def calculate_rewards_common(covariates, coeffs_common): """Calculate common rewards. Covariates 9 and 10 are indicators for high school and college graduates. Parameters ---------- covariates : np.ndarray Array with shape (num_states, 16) containing covariates. coeffs_common : np.ndarray Array with shape (2,) containing coefficients for high school and college graduates. Returns ------- np.ndarray Array with shape (num_states, 1) containing common rewards. Reshaping is necessary to broadcast the array over rewards with shape (num_states, 4). Example ------- >>> state_space = StateSpace(2, 1, [12, 16], 20) >>> coeffs_common = np.array([0.05, 0.6]) >>> calculate_rewards_common(state_space.covariates, coeffs_common).reshape(-1) array([0.05, 0.05, 0.65, 0.65, 0.05, 0.05, 0.05, 0.05, 0.65, 0.65, 0.65, 0.65]) """ return covariates[:, 9:11].dot(coeffs_common).reshape(-1, 1)
00b268aeddda5fdec4c8faf7b22d897e13cdd8c3
91,610
def correct_time_string(timeString): """ Returns a valid string version of the time of departure/arrival based on given argument value If no-value is given, "ALL_DAY" will be returned :param timeString: String to correct :return: Corrected string value """ if timeString == "BEFORE_NOON": return "BEFORE_NOON" elif timeString == "NOON_TO_6PM": return "NOON_TO_SIX" elif timeString == "AFTER_6PM": return "AFTER_SIX" else: return "ALL_DAY"
a7a77c460877d119839f0e7ec8cee9453e85b1b2
91,619
def contains_common_item(arr1, arr2): """ Loop through the first array For each item in the first array loop through the second array If first array item == second array item return True else return False O(a*b) Time Complexity O(1) Space Complexity """ for first_array_item in arr1: for second_array_item in arr2: if first_array_item == second_array_item: return True return False
b9d058dff8e2b54d86cacdf339162da2e7ef08d6
91,620
def _get_output(layer, index=0): """ :param layer : NN Layer Proto message :param index : Layer output index (Default 0) :returns name of output at provided index if present, otherwise None """ if len(layer.output) <= index: return None return layer.output[index]
dac9dfe6df79ff67213b64f6b9b58577c7425023
91,621
def as_grid(array, board_size): """Convert a 1D array into a 2D array with given board size.""" return [array[i : i + board_size] for i in range(0, len(array), board_size)]
f7fbf5383e970680af5844684d7393b2296af1d1
91,623
import requests import io def _resp(text=''): """Construct a response with plain text""" resp = requests.Response() resp.status_code = 200 resp.encoding = 'utf8' resp.raw = io.BytesIO(text.encode('utf8')) return resp
e0a7e3b04c0ebc95eecdc489649336f813db364d
91,626
import re def clean_resolver_url_pattern(route: str) -> str: """Cleans the full url path pattern from a url resolver into a OpenAPI schema url pattern. Args: route (str): Full URL path pattern including any prefixed paths. Returns: str: OpenAPI path format Example:: >>clean_resolver_url_pattern("toy/%(toyId)s/uploadImage") toy/{toyId}/uploadImage """ return re.sub(r"\%\(([a-zA-Z0-9\-\_]*)\)s", r"{\1}", route)
83559fbc9ad660663e2b02b5bc8138f2763f5a14
91,634
def haproxy_flatten(d, separator='.'): """ haproxy_flatten a dictionary `d` by joining nested keys with `separator`. Slightly modified from <http://codereview.stackexchange.com/a/21035>. >>> haproxy_flatten({'eggs': 'spam', 'sausage': {'eggs': 'bacon'}, 'spam': {'bacon': {'sausage': 'spam'}}}) {'spam.bacon.sausage': 'spam', 'eggs': 'spam', 'sausage.eggs': 'bacon'} """ def items(): for k, v in d.items(): try: for sub_k, sub_v in haproxy_flatten(v, separator).items(): yield separator.join([k, sub_k]), sub_v except AttributeError: yield k, v return dict(items())
f335f0a4934d5f8e939702959cea021894d930ed
91,635
import math def burning_probability(theta, V, c1, c2, Ph, Pden, Pveg): """ This function calculates the probability of a cell to catch fire (go to burning state) based on the Pveg and Pden of the cell as well as the location of the neighbouring cell relative to the cell and the north. The location of the neighbour cell impacts the burning probability of the cell due to wind speed and direction Input: theta: int - angle based on position of neighbouring cell V: int - wind speed c1: float - wind constant c2: float - float constant Ph: float - constant probability that a cell adjacent to a burning cell containing a given type of vegetation and density will catch fire at the next time step under no wind and flat terrain conditions Pden: float - type of vegetation in the cell Pveg: float - density of the vegetation in the cell Output: Pburn: float - the probability that a cell adjecent to a burning cell will catch fire in the next time step """ # compute wind effects ft = math.e**(V*c2*(math.cos(theta)-1)) Pw = ft*math.e**(c1*V) # compute burning probability Pburn = Ph*(1+Pden)*(1+Pveg)*Pw return Pburn #### MAIN PROGRAM ####
676f892e0e0b8135b7e4d237c06b96d8706cee9b
91,640
def _GetPortList(range_list): """Creates list of singleton port numbers from list of ports and ranges.""" ports = [] for port_range in range_list: ports.extend(list(range(port_range.start, port_range.end + 1))) return sorted(ports)
6e2b7c0f0668f4c2262d13b183fbdc0c6e715c1d
91,641
def children(tag): """Returns the list of children's text""" return [x.text for x in tag if hasattr(x, 'text')]
417546e1be1d85ed8ade81ace94f7c83dba2617e
91,643
def calc_percolation_flux(unsatStore, unsatStore_max, fieldCap, k_sat, beta): """ Calculate the percolation flux from the unsaturated to the saturated zone Parameters ---------- unsatStore : int or float Storage in the unsaturated zone [mm] unsatStore_max : int or float Maximum storage in the unsaturated zone [mm] fieldCap : int or float Field capacity [mm] k_sat : int or float Maximum percolation rate [mm day^-1] beta : int or float Parameter to account for percolation non-linearity Returns ------- percolation : int or float Percolation flux [mm day^-1] """ if unsatStore < fieldCap: percolation = 0.0 else: percolation = k_sat * ((unsatStore - fieldCap) / (unsatStore_max - fieldCap)) ** beta return percolation
d04b0d781af3be2cf3427d4819b794ab959552c3
91,646
import requests import json def get_json_data(url): """Makes a HTTP request with the given url and returns json data as a python object""" raw_json = requests.get(url) try: json_data = json.loads(raw_json.content) except json.JSONDecodeError: print("Error: Got non valid JSON document from", url) raise return json_data
95ab2d8d0a6a2607d10f674bcedc3fbdda2ce9fe
91,648
import random import string def random_str(length: int = 8): """Generate a random string of fixed length """ return ''.join(random.choice(string.ascii_letters) for i in range(length))
bd5af189398a610055660098ebfae78b134d837d
91,649
def isCFile(filename): """Returns True if `filename` ends in .c, .cpp, .cc""" return filename.endswith(".c") or filename.endswith(".cpp") or filename.endswith(".cc")
5dc618fafb46f39fdb9fa820769057ea2f44ded8
91,651
import bisect def getIndex(abdlist, pos): """ Determine the item in the abdlist that is to the immediate right of the pos. This is the basically identify the closest target region """ i = bisect.bisect_right(abdlist, pos) return i
877a6d7bb7d8c8715518d82a309ca7928f9bb349
91,653
def get_floor_distribution(N_floors, N_classes): """ Distribute the number of classes evenly over the number of available floors. Parameters ---------- N_floors : int Number of available floors. N_classes : int Number of classes in the school. Returns ------- floors : dictionary Dictionary of the form {floor1:[class_1, class_2, ...], ...} floors_inv : dictionary Dictionary of the form {class1:floor1, ..., class_N:floor_N} """ floors = {i:[] for i in range(N_floors)} # starts with 0 (ground floor) classes = list(range(1, N_classes + 1)) classes_per_floor = int(N_classes / N_floors) # easiest case: the number of classes is divisible by the number of floors if N_classes % N_floors == 0: for i, floor in enumerate(range(N_floors)): floors[floor] = classes[i * classes_per_floor: \ i * classes_per_floor + classes_per_floor] # if there are leftover classes: assign them one-by-one to the existing # floors, starting with the lowest else: leftover_classes = N_classes % N_floors classes_per_floor += 1 for i, floor in enumerate(range(N_floors)): if i < leftover_classes: floors[floor] = classes[i * classes_per_floor: \ i * classes_per_floor + classes_per_floor] # hooray, index magic! else: floors[floor] = classes[leftover_classes * classes_per_floor + \ (i - leftover_classes) * (classes_per_floor - 1): leftover_classes * (classes_per_floor) + \ (i - leftover_classes) * (classes_per_floor - 1) + \ classes_per_floor - 1] # invert dict for easier use floors_inv = {} for floor, classes in floors.items(): for c in classes: floors_inv.update({c:floor}) return floors, floors_inv
c18bdf55a1d7d881d41ad6b730b653db162c161e
91,656
import networkx def subgraph_between_nodes(graph, source, frontier, include_frontier=False): """ For a directed graph, return a subgraph that includes all nodes going from a source node to a target node. :param networkx.DiGraph graph: The directed graph. :param source: The source node. :param list frontier: A collection of target nodes. :param bool include_frontier: Should nodes in frontier be included in the subgraph. :return: A subgraph. :rtype: networkx.DiGraph """ graph = networkx.DiGraph(graph) # make a copy for pred in list(graph.predecessors(source)): # make sure we cannot go from any other node to the source node graph.remove_edge(pred, source) g0 = networkx.DiGraph() if source not in graph or any(node not in graph for node in frontier): raise KeyError("Source node or frontier nodes are not in the source graph.") # BFS on graph and add new nodes to g0 queue = [ source ] traversed = set() frontier = set(frontier) while queue: node = queue.pop(0) traversed.add(node) for _, succ, data in graph.out_edges(node, data=True): g0.add_edge(node, succ, **data) if succ in traversed or succ in frontier: continue for frontier_node in frontier: if networkx.has_path(graph, succ, frontier_node): queue.append(succ) break # recursively remove all nodes that have less than two neighbors to_remove = [ n for n in g0.nodes() if n not in frontier and n is not source and (g0.out_degree[n] == 0 or g0.in_degree[n] == 0) ] while to_remove: g0.remove_nodes_from(to_remove) to_remove = [ n for n in g0.nodes() if n not in frontier and n is not source and (g0.out_degree[n] == 0 or g0.in_degree[n] == 0) ] if not include_frontier: # remove the frontier nodes g0.remove_nodes_from(frontier) return g0
24d098c19296efc2ae6996f7d2eff01f0991b2bd
91,659
import re def query_and_ref_names_from_path(filepath): """ Return a tuple of the query and product names from a path including bin names separated by "_to". E.g. "potential_relpath/bin_a_to_bin_b" --> bin_a, bin_b :param filepath: string to search :return: dict containing the query_bin, ref_bin """ # \w matches a "word" character: a letter or digit or underbar [a-zA-Z0-9_] search = '([\w\.#-]+)_to_([\w\.#-]+).tsv' match = re.search(search, filepath) assert match, 'match not found for {} in {}'.format(search, filepath) if match: return {'query': match.group(1), 'ref':match.group(2)} else: return None
9f8a9376e24b9ab927fd313f5cabcb58777de7c8
91,660
def prepare_attributes(attributes): """ Method that "e_" in front of every attribute if trace attributes are considered. :param attributes: List of event attributes that the user wants to consider. :return: list of edited attribute names """ new_attributes = [] for attribute in attributes: new_attributes.append("e_" + attribute) return new_attributes
643af9734febde9c7d9ddb5c144bb1c89e923127
91,661
def determine_annuity( months_to_legal_maturity: int, outstanding_balance: float, interest_rate: float) -> float: """Calculate the (monthly) annuity. For mortgage with specified months_to_legal_maturity (>=1), outstanding balance, and interest rate (decimal, annual). """ tau = interest_rate / 12 kappa = (1 + tau) ** months_to_legal_maturity return outstanding_balance * tau * kappa / (kappa - 1)
eae57b2d10cdb66bb4906c458e8d785cbb9a761d
91,663
def format_repeat(protein_id, repeats): """Format repeats nicely for printing. Protein_ID Repeat1 Repeat2 Repeat3 """ margin = len(protein_id) + 4 output = '' for i, repeat in enumerate(repeats): if i == 0: output = f'{protein_id} {repeat}\n' else: space = ' ' * margin output += f'{space}{repeat}\n' return output
3a7145823661a8de21fea80290692e09f39e506b
91,664
def organization_analytics_by_voter_doc_template_values(url_root): """ Show documentation about organizationAnalyticsByVoter """ required_query_parameter_list = [ { 'name': 'organization_we_vote_id', 'value': 'string', # boolean, integer, long, string 'description': 'An organization\'s unique We Vote id.', }, { 'name': 'organization_api_pass_code', 'value': 'string', # boolean, integer, long, string 'description': 'An organization\'s unique pass code for retrieving this data. ' 'Not needed if organization is signed in.', }, { 'name': 'voter_device_id', 'value': 'string', # boolean, integer, long, string 'description': 'Not needed if organization_api_pass_code is used.', }, { 'name': 'api_key', 'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string 'description': 'The unique key provided to any organization using the WeVoteServer APIs', }, ] optional_query_parameter_list = [ { 'name': 'election_id', 'value': 'integer', # boolean, integer, long, string 'description': 'Limit the results to just this election', }, { 'name': 'external_voter_id', 'value': 'string', # boolean, integer, long, string 'description': 'Limit the results to just this voter', }, { 'name': 'voter_we_vote_id', 'value': 'string', # boolean, integer, long, string 'description': 'Limit the results to just this voter', }, ] potential_status_codes_list = [ # { # 'code': 'VALID_VOTER_DEVICE_ID_MISSING', # 'description': 'Cannot proceed. A valid voter_device_id parameter was not included.', # }, # { # 'code': 'VALID_VOTER_ID_MISSING', # 'description': 'Cannot proceed. A valid voter_id was not found.', # }, ] try_now_link_variables_dict = { } api_response = '{\n' \ ' "status": string,\n' \ ' "success": boolean,\n' \ ' "organization_we_vote_id": string,\n' \ ' "election_list": list\n' \ ' [\n' \ ' "election_id": string,\n' \ ' "election_name": string,\n' \ ' "election_date": string,\n' \ ' "election_state": string,\n' \ ' ],\n' \ ' "voter_list": list\n' \ ' [\n' \ ' "external_voter_id": string (Unique ID from organization),\n' \ ' "voter_we_vote_id": string (the voter\'s we vote id),\n' \ ' "elections_visited: list,\n' \ ' [\n' \ ' "election_id": string (the election if within we vote),\n' \ ' "support_count": integer (COMING SOON),\n' \ ' "oppose_count: integer (COMING SOON),\n' \ ' "friends_only_support_count": integer (COMING SOON),\n' \ ' "friends_only_oppose_count: integer (COMING SOON),\n' \ ' "friends_only_comments_count": integer (COMING SOON),\n' \ ' "public_support_count": integer (COMING SOON),\n' \ ' "public_oppose_count: integer (COMING SOON),\n' \ ' "public_comments_count": integer (COMING SOON),\n' \ ' ],\n' \ ' ],\n' \ '}' template_values = { 'api_name': 'organizationAnalyticsByVoter', 'api_slug': 'organizationAnalyticsByVoter', 'api_introduction': "A list of voter-specific analytics about either a) one of your member's, or b) all of your members " "based on the variables you send with the request. These analytics come from visits to organization's " "custom URL, and not the main WeVote.US site.", 'try_now_link': 'apis_v1:organizationAnalyticsByVoterView', 'try_now_link_variables_dict': try_now_link_variables_dict, 'url_root': url_root, 'get_or_post': 'GET', 'required_query_parameter_list': required_query_parameter_list, 'optional_query_parameter_list': optional_query_parameter_list, 'api_response': api_response, 'api_response_notes': "", 'potential_status_codes_list': potential_status_codes_list, } return template_values
77dc6a9ce0e6cb3416f3eb75aed6302e8002fa23
91,665
def unlist(listed): """takes a list and converts to comma-separated string""" unlisted=(",".join(a for a in listed)) return unlisted
ab1130ad16d89abe734f12dc7f918aa417c8c5dd
91,672
def is_config_field(attr: str): """Every string which doesn't start and end with '__' is considered to be a valid usable configuration field.""" return not (attr.startswith('_') or attr.endswith('_'))
7cd8ba2e4dab21594027d27a918de299d9500711
91,674
def remove_template(s): """Remove template wikimedia markup. Return a copy of `s` with all the wikimedia markup template removed. See http://meta.wikimedia.org/wiki/Help:Template for wikimedia templates details. Note: Since template can be nested, it is difficult remove them using regular expresssions. """ # Find the start and end position of each template by finding the opening # '{{' and closing '}}' n_open, n_close = 0, 0 starts, ends = [], [] in_template = False prev_c = None for i, c in enumerate(iter(s)): if not in_template: if c == '{' and c == prev_c: starts.append(i - 1) in_template = True n_open = 1 if in_template: if c == '{': n_open += 1 elif c == '}': n_close += 1 if n_open == n_close: ends.append(i) in_template = False n_open, n_close = 0, 0 prev_c = c # Remove all the templates s = ''.join([s[end + 1:start] for start, end in zip(starts + [None], [-1] + ends)]) return s
df26b8ab6bcb2455a5691a74c6e6bdfe969b98e3
91,691
def most_common(lst): """ Return the most frequently occuring element in a list. """ return max(set(lst), key=lst.count)
7ae46265edb24c8f71b631b60323191e9763c1d7
91,694
def is_in_window(n, window_center, tolerance) -> bool: """Given a number and window, returns true if the number is in that range.""" return window_center - tolerance <= n <= window_center + tolerance
815d56cd3e4d2f55d587dff536a03ee8a6320b3f
91,696
def extract_name_value(data: dict) -> tuple: """Extract name and value from a simple item, take first dictionary if it is a list.""" item = data.get("SimpleItem", {}) if isinstance(item, list): item = item[0] return (item.get("@Name", ""), item.get("@Value", ""))
b6bdebf866f5387afeee194889ab1d1451840586
91,705
def freeSymbols(rv): """ Extracts the free symbols/parameters of a probability distribution from a SymPy random variable, independent of the SymPy version. Note: In SymPy version <=1.0, the attribute "distribution" was found in rv._sorted_args[0].distribution, while as of version 1.1, it is found in rv._sorted_args[1].distribution. Args: rv: SymPy random variable Returns: Free symbols of a SymPy random variable """ try: symbols = rv._sorted_args[0].distribution.free_symbols except AttributeError: symbols = rv._sorted_args[1].distribution.free_symbols return list(symbols)
b68e495a2620d973c6e066d4b54815524b42abca
91,706
def mean_representation( ground_truth_data, gaussian_encoder, random_state, save_path, ): """Extracts the mean representation from a Gaussian encoder. Args: ground_truth_data: GroundTruthData to be sampled from. gaussian_encoder: Function that takes observations as input and outputs a dictionary with mean and log variances of the encodings in the keys "mean" and "logvar" respectively. random_state: Numpy random state used for randomness. save_path: String with path where results can be saved. Returns: transform_fn: Function that takes as keyword arguments the "mean" and "logvar" tensors and returns a tensor with the representation. None as no variables are saved. """ del ground_truth_data, gaussian_encoder, random_state, save_path def transform_fn(mean, logvar): del logvar return mean return transform_fn, None
160fe19bc9088e824179fa6398e3d4054f9b8037
91,707
from functools import reduce from operator import mul def factorial(n): """ Returns n! = n(n-1)(n-2)... """ return reduce(mul, range(1, n), 1)
e03d132282092f6825b24ad434afcdce3c09f4fe
91,708
import time def timeit(method): """ Decorator to time the execution of the decorated method/function. """ def timed(*args, **kw): ts = time.time() result = method(*args, **kw) te = time.time() print(f'{method.__name__}: {(te - ts)/60:.2f} mins, ' f'{(te - ts):.2f} seconds') return result return timed
f20e77e060c245f15909e2cfb87091b549400e53
91,709
def get_model_or_none(model, *args, **kwargs): """ Get model object or return None, this will catch the DoesNotExist error. Keyword Arguments: model -- this is the model you want to query from other parameters are of variable length: e.g id=1 or username='jon.snow' """ try: return model.objects.get(*args, **kwargs) except model.DoesNotExist: return None
49465d96ba9c24006683b31e57d70d53fbdf5fb0
91,713
def _serialize_numeric_array(data): """Serializes a numeric numpy array into bytes. Args: data: A numeric numpy array. Returns: bytes of the array to be serialized on disk. """ return data.tobytes()
554a65e4597ccbdeaece6146c9f1ca87d4eb901c
91,714
def flatten_collections(items: list) -> list: """When creating nested collections, will flatten all the requests to a list of dicts.""" lst = list() for item in items: if isinstance(item, list): # A list of collections lst += flatten_collections(item) elif isinstance(item, dict) and 'item' in item: # Get requests from collection lst += flatten_collections(item['item']) else: # Request itself is a dict. lst.append(item) return lst
67cda7a19c09cbf5f626619b4b45aacccbbe8b15
91,716
import torch def cutoffs() -> torch.Tensor: """Cutoffs for the number of positions to include when computing metrics""" return torch.tensor([10, 20, 50, 100])
583bf3af737c7f9a4bdb3d3b307fd1fa3fbc9ee8
91,717
import re def GetBitstreamVersion(aBitstreamFile): """Extract version number from given bitstream file.""" f = open(aBitstreamFile, 'rt') data = f.read() f.close() ver = re.search(r'[0-9]+\.[0-9]+\.[0-9]+', data) if ver: return ver.group(0) return 'Unknown'
010a63afc0459be3233c23ac0bfeb0a9140768ea
91,718
import time def next_timestamp(boundary): """Returns the timestamp for the first moment after current time on the given boundary.""" timestamp = time.time() return timestamp + boundary - timestamp % boundary
aa51f500a7e0a3e373100ff5f68cc85651580ff8
91,722
def is_even(number): """ Check if `number` is even. Parameters ---------- number : integer The integer to be checked Returns ------- boolean Returns True of `number` is even, False otherwise. """ return number % 2 == 0
3da12c90c576dcf5d81a1f2b3c3264193eda7e43
91,734
import time def send_command(remote_conn, cmd="", delay=1): """ Send command down the channel. Retrieve and return the output. """ MAX_BUFFER=65000 if cmd != "": cmd = cmd.strip() remote_conn.send(cmd + '\n') time.sleep(delay) if remote_conn.recv_ready(): return remote_conn.recv(MAX_BUFFER) else: return ""
4f3e20b13e1618e224fcd99292598e287cc3d31f
91,735
def parse_python_version(ver_str: str) -> tuple[int, ...]: """Convert python version to a tuple of integers for easy comparison.""" return tuple(int(digit) for digit in ver_str.split("."))
1efb70ba1284126a40a1efc9d23635d4bd5856bb
91,736
import inspect def get_function_signature(func): """ Return the signature string of the specified function. >>> def foo(name): pass >>> get_function_signature(foo) 'foo(name)' >>> something = 'Hello' >>> get_function_signature(something) Traceback (most recent call last): ... TypeError: The argument must be a function object: None type is <class 'str'> """ if func is None: return 'Function is None' try: func_name = func.__name__ except AttributeError: func_name = 'None' if not inspect.isfunction(func): raise TypeError('The argument must be a function object: %s type is %s' % (func_name, type(func))) return func_name + str(inspect.signature(func))
8e627e5685604d38ad7ecaaeb05c531da29ec093
91,737
def findExtremePoints(c): """ find all extreme points in a contour :param c: a contour :return: l: leftmost point r: rightmost point t: topmost point b: bottommost point """ l = tuple(c[c[:, :, 0].argmin()][0]) r = tuple(c[c[:, :, 0].argmax()][0]) t = tuple(c[c[:, :, 1].argmin()][0]) b = tuple(c[c[:, :, 1].argmax()][0]) return l, r, t, b
33ac534da744e66e8051a6409cf0dae15e6e4e74
91,743
def find_relative_radius(level_diff): """ Find the relative radius of a node with respect to the root node, which is assumed to have side-length 1. """ return 0.5 * (1 << level_diff)
1dd4202c9d448010573d31c3e97fd194fffc94fa
91,745
def _is_intstring(input_arg: str): """ Checks to see if the input of the command line can be an Int Parameters --------- input_arg The argument passed in by the user through the CLI """ try: int(input_arg) return True except ValueError: return False
2ffbf4278f91ea09114b00bc95b03edca364a7b0
91,746
def count_neighbours(grid, row, col): """ Input: Three arguments. A grid as a tuple of tuples with integers (1/0), a row number and column number for a cell as integers. Output: How many neighbouring cells have chips as an integer. """ # using the hint: form a list of neighbor rules NEIGHBORS = ((-1,-1), (-1,0), (-1,1), (0,-1), (0,1), (1,-1), (1,0), (1,1)) # create a list of all cell positions in the provided grid boundary = [(r,c) for r in range(len(grid)) for c in range(len(grid))] # build a list of cells to count moore_neighbors = [(r+row, c+col) for r,c in NEIGHBORS] # count the cells that are within the bounds of the grid return sum([grid[r][c] for (r,c) in moore_neighbors if (r,c) in boundary])
e1eb77744c18cc45e04480894060f7af85d0cc18
91,747
def has_trigger_lemmas(metadata, lemmas=["infection", "death", "hospitalization"]): """ Return True if any lemmas in the metadata dict match lemmas of interest. By default, lemmas of interest are "infection", "death", and "hospitalization". Written to improve readability, since this is used a lot in the annotation functions. """ return any([lemma in metadata["attributes"] for lemma in lemmas])
ae7f27fba40392d378619bd83c3537a2a2518d15
91,752
from datetime import datetime def str_to_date_tz_naive(str_to_convert): """This function converts a string to a datetime object. Args: str_to_convert (string): The string to convert in the format of <YYYY>-<MM>-<DD> Returns: datetime: The input string converted to a datetime object. """ output = datetime.strptime(str_to_convert, "%Y-%m-%d") return output
73951767affa842462b029ce3fb5500f5d04a304
91,755
import itertools def resolve_sublists(lists): """ Resolve the sublists in list """ if lists: if isinstance(lists[0], list): lists=list(itertools.chain.from_iterable(lists)) return lists
c9749d384fe4cf201e7a7102aeb46b7cf9a68db6
91,758
def get_db_data(info_dict, enum_type): """ :param info_dict: db info dict :param enum_type: db field enum :return: tuple of fields values defined in enum_type; Empty string if field not in info_dict """ return (info_dict.get(field.value, "") for field in enum_type)
9a11dae1475fcfa3a50cee5ab9354a9518f9cea4
91,765
def dump_args(args): """ Convert args (argparse.Namespace) to printable string """ lines = [] key_len = max(map(len, vars(args).keys())) for attr, value in vars(args).items(): line = "{:{key_len}s} = {}".format(attr, value, key_len=key_len) lines.append(line) return "\n".join(lines)
5dfcda683e760507e5a6a1aee5cd7a67eec151d1
91,775
def _check_remove_item(the_list, item): """Helper function for merge_lists that implements checking wether an items should be removed from the list and doing so if needed. Returns ``True`` if the item has been removed and ``False`` otherwise.""" if not isinstance(item, str): return False if not item.startswith('~'): return False actual_item = item[1:] if actual_item in the_list: del the_list[the_list.index(actual_item)] return True
426fbdab5d89d1f052fdfb2686e571c5430418c5
91,782
def corrected_pas(partitionA, partitionB, taxlen=None, excluded=None): """ Computed corrected partition agreement score. The corrected partition agreement score corrects for singleton character states and for character states that recur in all the taxonomic units in the data. These extreme cases are successively ignored when computing the partition agreement score. @param partitionA, partitionB: set partitions to be compared @param taxlen: if set to None, the number of taxa will be computed from partitionA @param excluded: how to return excluded characters (defaults to None) """ links, matches = [], [] # prune by getting number of taxa described by partition if not taxlen: all_taxa = set() for prt in partitionA: for taxon in prt: all_taxa.add(taxon) taxlenA = len(all_taxa) all_taxa = set() for prt in partitionB: for taxon in prt: all_taxa.add(taxon) taxlenB = len(all_taxa) else: taxlenA, taxlenB = taxlen, taxlen for i, prtB in enumerate(partitionB): for j, prtA in enumerate(partitionA): if taxlenA > len(prtA) > 1 and taxlenB > len(prtB) > 1: if prtA.intersection(prtB): links += [1] if prtB.issubset(prtA): matches += [1] if matches: return sum(matches)/sum(links) elif links: return 0 return excluded
9fb8b6a30498808eb1ba528f5b897a4aced3b54e
91,784