content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import torch def precision(pr, gt, eps=1e-7, threshold=None, ignore_channels=None, num_classes=None): """Calculate precision score between ground truth and prediction Args: pr (torch.Tensor): predicted tensor gt (torch.Tensor): ground truth tensor eps (float): epsilon to avoid zero division threshold: threshold for outputs binarization Returns: float: precision score """ if num_classes == None: scores = [] for prs, gts in zip(pr, gt): for pr, gt in zip(prs, gts): tp = torch.sum(gt * pr) fp = torch.sum(pr) - tp score = tp / (tp + fp + eps) scores.append(score) return sum(scores) / len(scores) else: scores = [0.0] * num_classes batch_size = len(pr) for prs, gts in zip(pr, gt): for i, pr, gt in zip(range(num_classes), prs, gts): tp = torch.sum(gt * pr) fp = torch.sum(pr) - tp score = tp / (tp + fp + eps) scores[i] += score / batch_size return scores
3193364df01fa502c9baba836190aff2ae4eff96
696,078
def count_the_letters(count_words): """Find the number of distinct letters.""" letters = {} for word in count_words.keys(): for letter in word: letters.setdefault(letter, 0) letters[letter] += count_words[word] return len(letters.keys())
f0fce074aa85ee72f93f6066ac509ea7f678ec75
696,079
def extract_x_positions(parameter, joining_string="X"): """ find the positions within a string which are X and return as list, including length of list :param parameter: str the string for interrogation :param joining_string: str the string of interest whose character positions need to be found :return: list list of all the indices for where the X character occurs in the string, along with the total length of the list """ return [loc for loc in range(len(parameter)) if parameter[loc] == joining_string] + [ len(parameter) ]
5843d7a86823b960bb1c99379174f60697850378
696,080
def char2cid(char, char2id_dict, OOV="<oov>"): """ Transform single character to character index. :param char: a character :param char2id_dict: a dict map characters to indexes :param OOV: a token that represents Out-of-Vocabulary characters :return: int index of the character """ if char in char2id_dict: return char2id_dict[char] return char2id_dict[OOV]
4a872cb12f11ed8ba2f3369749a3a2f356b7b97e
696,081
import torch def _torch_get_default_dtype() -> torch.dtype: """A torchscript-compatible version of torch.get_default_dtype()""" return torch.empty(0).dtype
12f99b26756601bec9f1043d5f995fc9e3cd3d39
696,082
def bin_to_dec(binary: str) -> int: """Return the parm binary string as a decimal integer.""" decimal = 0 place_value = 1 for bit in binary[::-1]: if bit == '1': decimal += place_value place_value *= 2 return decimal
40917bab1739aed79721cc8eb6f09a37d60c15aa
696,084
import re def extractCN(dn): """Given the dn on an object, this extracts the cn.""" return re.findall('CN=(.*?),', dn)[0]
dad91c436b5035664dd6d463e0e626949b6cd838
696,086
def packRangeBits(bitSet): """Given a set of bit numbers, return the corresponding ulUnicodeRange1, ulUnicodeRange2, ulUnicodeRange3 and ulUnicodeRange4 for the OS/2 table. >>> packRangeBits(set([0])) (1, 0, 0, 0) >>> packRangeBits(set([32])) (0, 1, 0, 0) >>> packRangeBits(set([96])) (0, 0, 0, 1) >>> packRangeBits(set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 65, 98])) (4294967295, 1, 2, 4) >>> packRangeBits(set(range(128))) (4294967295, 4294967295, 4294967295, 4294967295) >>> 0xffffffff 4294967295 """ bitNum = 0 bitFields = [] for i in range(4): bitField = 0 for localBitNum in range(32): if bitNum in bitSet: mask = 1 << localBitNum bitField |= mask bitNum += 1 bitFields.append(bitField) assert bitNum == 128 ur1, ur2, ur3, ur4 = bitFields return ur1, ur2, ur3, ur4
a4484da8635efe9c1ddc5259563ff6db5b2b5ed4
696,087
def reward_min_waiting_time(state, *args): """Minimizing the waiting time. Params: ------ * state: ilurl.state.State captures the delay experienced by phases. Returns: -------- * ret: dict<str, float> keys: tls_ids, values: rewards """ try: wait_times = state.feature_map( filter_by=('waiting_time',) ) except AttributeError: wait_times = state ret = {} for tls_id, phase_obs in wait_times.items(): ret[tls_id] = -sum([dly for obs in phase_obs for dly in obs]) return ret
85e88f15e560d761ac59bdbd31bfb78cdfe6936f
696,088
def is_published(file_path): """ Returns whether an absolute file path refers to a published asset :param file_path: str, absolute path to a file :return: bool """ return False
92c27584aab6955c5b7ef87cec656b7868d01873
696,089
def flatten_dict(dd, separator='_', prefix=''): """ Flattens the dictionary with eval metrics """ return { prefix + separator + k if prefix else k: v for kk, vv in dd.items() for k, v in flatten_dict(vv, separator, kk).items() } if isinstance(dd, dict) else {prefix: dd}
0a49c3b6fd3f221283d11a6b2795ac7c80cf8554
696,090
from pathlib import Path import yaml def get_config(base_path: Path): """ Get the config file from the base path. :param base_path: The base path to the .fsh-validator.yml File. :return: Configuration """ config_file = base_path / ".fsh-validator.yml" if not config_file.exists(): return dict() return yaml.safe_load(open(config_file))
694aad52afda7588d44db9f22cc31f05e64358ac
696,091
def _combine_ind_ranges(ind_ranges_to_merge): """ Utility function for subdivide Function that combines overlapping integer ranges. Example [[1,2,3], [2,3], [3], [4,5], [5]] -> [[1,2,3], [4,5]] """ ind_ranges_to_merge = sorted(ind_ranges_to_merge) stack = [] result = [] for curr in ind_ranges_to_merge: if len(stack) == 0: stack.append(curr) elif stack[-1][-1] >= curr[0]: prev = stack.pop() merged = sorted(list(set(prev + curr))) stack.append(merged) else: prev = stack.pop() result.append(prev) stack.append(curr) result += stack return result
fcece4c58a0d231863b0bfb22bd3ff20bcd5858e
696,092
def nested_get(ind, coll, lazy=False): """ Get nested index from collection Examples -------- >>> nested_get(1, 'abc') 'b' >>> nested_get([1, 0], 'abc') ['b', 'a'] >>> nested_get([[1, 0], [0, 1]], 'abc') [['b', 'a'], ['a', 'b']] """ if isinstance(ind, list): if lazy: return (nested_get(i, coll, lazy=lazy) for i in ind) else: return [nested_get(i, coll, lazy=lazy) for i in ind] return seq else: return coll[ind]
43be2411aa5319ef49d84b70ff83e5e6ab5cb2ea
696,093
def structural_similarity_index(image_true, image_generated, C1=0.01, C2=0.03): """Compute structural similarity index. Args: image_true: (Tensor) true image image_generated: (Tensor) generated image C1: (float) variable to stabilize the denominator C2: (float) variable to stabilize the denominator Returns: ssim: (float) mean squared error""" mean_true = image_true.mean() mean_generated = image_generated.mean() std_true = image_true.std() std_generated = image_generated.std() covariance = ( (image_true - mean_true) * (image_generated - mean_generated)).mean() numerator = (2 * mean_true * mean_generated + C1) * (2 * covariance + C2) denominator = ((mean_true ** 2 + mean_generated ** 2 + C1) * (std_true ** 2 + std_generated ** 2 + C2)) return numerator / denominator
0678192e5bf9238a91a6bce0d1bdb20f533d36e9
696,094
import hashlib def get_md5_hash(to_hash): """Calculate the md5 hash of a string Args: to_hash: str The string to hash Returns: md5_hash: str The hex value of the md5 hash """ return hashlib.md5(to_hash.encode('utf-8')).hexdigest()
118b5b87500b22780f541fa46ad54361c7e7440e
696,095
def calc_tile_locations(tile_size, image_size): """ Divide an image into tiles to help us cover classes that are spread out. tile_size: size of tile to distribute image_size: original image size return: locations of the tiles """ image_size_y, image_size_x = image_size locations = [] for y in range(image_size_y // tile_size): for x in range(image_size_x // tile_size): x_offs = x * tile_size y_offs = y * tile_size locations.append((x_offs, y_offs)) return locations
fa898d2b5da4a6d6482d52238eecd1460bd0d167
696,096
from typing import List import os import re import glob def list_files(filepath: str) -> List[str]: """ List files within a given filepath. Parameters ---------- filepath : str Supports wildcard "*" character. Returns ------- list list of filepaths """ if os.path.isdir(filepath) and len(re.findall(r"[\w.]$", filepath)) > 0: filepath = filepath + "/*" if filepath.endswith("/"): filepath = filepath + "*" return [file for file in glob.glob(filepath) if os.path.isfile(file)]
a11f8d8b1739b2d383c9b7c1322b7547e1ebdbf1
696,098
import math def schedule(epoch, initial_learning_rate, lr_decay_start_epoch): """Defines exponentially decaying learning rate.""" if epoch < lr_decay_start_epoch: return initial_learning_rate else: return initial_learning_rate * math.exp((10 * initial_learning_rate) * (lr_decay_start_epoch - epoch))
27157889fe5339bd6b99fa0a74b222a7ee0abdad
696,099
def join(G, u, v, theta, alpha, metric): """Returns ``True`` if and only if the nodes whose attributes are ``du`` and ``dv`` should be joined, according to the threshold condition for geographical threshold graphs. ``G`` is an undirected NetworkX graph, and ``u`` and ``v`` are nodes in that graph. The nodes must have node attributes ``'pos'`` and ``'weight'``. ``metric`` is a distance metric. """ du, dv = G.nodes[u], G.nodes[v] u_pos, v_pos = du['pos'], dv['pos'] u_weight, v_weight = du['weight'], dv['weight'] return theta * metric(u_pos, v_pos) ** alpha <= u_weight + v_weight
8968ea954be10cf3c3e2ed2c87748b00da0d850a
696,101
def epsg_for_UTM(zone, hemisphere): """ Return EPSG code for given UTM zone and hemisphere using WGS84 datum. :param zone: UTM zone :param hemisphere: hemisphere either 'N' or 'S' :return: corresponding EPSG code """ if hemisphere not in ['N', 'S']: raise Exception('Invalid hemisphere ("N" or "S").') if zone < 0 or zone > 60: raise Exception('UTM zone outside valid range.') if hemisphere == 'N': ns = 600 else: ns = 700 if zone == 0: zone = 61 return int(32000 + ns + zone)
c448ffd7b18e605f938c7e8fa294a29218f74d36
696,102
def estimate_flag(bflag, corr_moments, cr_moments, cr_ldr, bounds_unfiltered_moments): """retrieve the integer flag from the binary flag the available integer flags are: .. code-block:: python {0: 'not influenced', 1: 'hydromet only', 2: 'plankton', 3: 'low snr', 4: '', 5: 'melting layer'} Args: bflag (dict): binary flag dict corr_moments (list): list with the corrected moments cr_moments (list): list with the cloud radar moemnts cr_ldr (float): cloud radar ldr in dB bounds_unfiltered_moments (list): all peak boundaries Returns: add_to_binary_flag, flag, flag_doc """ # do not overwrite bflag here! (this has to be done at top level) addbflag = {'low_snr': 0, 'plankton': 0, 'melting_layer': 0} flag_doc = {0: 'not influenced', 1: 'hydromet only', 2: 'plankton', 3: 'low snr', 4: '', 5: 'melting layer'} bins_above_noise = sum( [b[1]-b[0] for b in bounds_unfiltered_moments]) bounds_unfiltered_moments.sort() bin_max_span = bounds_unfiltered_moments[-1][-1] - bounds_unfiltered_moments[0][0] if bflag["particle_influence"] == 1: flag = 1 if corr_moments[0].snr < 10: addbflag['low_snr'] = 1 flag = 3 if cr_ldr > -13: if cr_moments[0].Z < -3: addbflag['plankton'] = 1 flag = 2 else: addbflag['melting_layer'] = 1 flag = 5 else: if (len(corr_moments) > 4 or bins_above_noise > 140 or bin_max_span > 180 or bflag['hs_higher_noise'] == 1): addbflag['melting_layer'] = 1 flag = 5 else: flag = 0 return addbflag, flag, flag_doc
8e600a4972de64cb0171239359b5609779723eab
696,103
def _get_type_name(ot): """ Examples -------- >>> _get_type_name(int) 'int' >>> _get_type_name(Tuple) 'Tuple' >>> _get_type_name(Optional[Tuple]) 'typing.Union[typing.Tuple, NoneType]' """ if hasattr(ot, "_name") and ot._name: return ot._name elif hasattr(ot, "__name__") and ot.__name__: return ot.__name__ else: return str(ot)
f6c6b13550128ca63baf3db1490df64dbf7795d0
696,104
import torch def _safe_check_pinned(tensor: torch.Tensor) -> bool: """Check whether or not a tensor is pinned. If torch cannot initialize cuda, returns False instead of error.""" try: return torch.cuda.is_available() and tensor.is_pinned() except RuntimeError: return False
6d023bf0554ac41834f421d07ea7959952dcc9e8
696,105
def roi_input(): """input function for 1. asking how many ROIs""" while True: roi_nums = input("How many ROIs would you like to select?: ") try: rois = int(roi_nums) print("Will do", roi_nums, "region/s of interest") return rois except ValueError: print("Input must be an integer")
e975759b318c7c20cd3c84d153530c90617ab435
696,106
def to_int(string): """ Convert string to int >>> to_int("42") 42 """ try: number = int(float(string)) except: number = 0 return number
3a475d3d3bc6257a0a6b52c71951a6b614e1926f
696,107
import sys def getCodeTextModeForPY(): """returns replace for python and surrogateescape for python3""" theResult = str("replace") try: if (sys.version_info >= (3, 2)): theResult = str("surrogateescape") else: theResult = str("replace") finally: return theResult
640ca685b9fcc14a4f5d20ba3386decf0bf9644f
696,108
import random def roll_dice(num_dice, die_type): """ 3d6, for example, would be num_dice = 3, die_type = 6 """ result = 0 for i in range(num_dice): result += random.randint(1, die_type) return result
98b3ed6107cdc2ced8467c36d7bd436c8a74d7b2
696,109
def sum_list(list_to_sum): """Function to sum the items in the input list.""" return sum(list_to_sum)
e4a922888d9ed229b0c74b4e9006cae7ba02c976
696,110
import torch def numpy_to_tensor(data): """Transform numpy arrays to torch tensors.""" return torch.from_numpy(data)
06c2aee2081bbb017d9b33065c6925c589378df9
696,111
import array import socket def recv_fds_once(sock, msglen, maxfds, fd_out): """Helper function from Python stdlib docs.""" fds = array.array("i") # Array of ints msg, ancdata, flags, addr = sock.recvmsg(msglen, socket.CMSG_LEN(maxfds * fds.itemsize)) for cmsg_level, cmsg_type, cmsg_data in ancdata: if cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS: # Append data, ignoring any truncated integers at the end. fds.frombytes(cmsg_data[:len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) fd_out.extend(fds) return msg
219517916a15e5a407574fb36091e8de46be6b8f
696,112
def unwrap(ftext): """We know that there will only be nested parens in the first argument""" l = ftext.index('(') fname = ftext[:l] argstring = ftext[l + 1:-1] if ')' in argstring: r = argstring.rindex(')') arg0 = argstring[:r + 1] args = [arg0] + argstring[r + 2:].split(',') else: args = argstring.split(',') return fname, args
e16be7796cf50e0821c17ff3fb9788546300bc63
696,115
def asint(x): """Convert x to float without raising an exception, return 0 instead.""" try: return int(x) except: return 0
b8ebcd4efc43c24726d35f7da80a5001b44b6f17
696,116
import typing def sort(array: list) -> list: """Insertion sort implementation. """ for j in range(1, len(array)): key: typing.Any = array[j] i: int = j - 1 while i > -1 and key < array[i]: array[i + 1] = array[i] i = i - 1 array[i + 1] = key return array
f3d36f95f3b7fc3e64593e23b9a21544fc880383
696,118
def line_width( segs ): """ Return the screen column width of one line of a text layout structure. This function ignores any existing shift applied to the line, represended by an (amount, None) tuple at the start of the line. """ sc = 0 seglist = segs if segs and len(segs[0])==2 and segs[0][1]==None: seglist = segs[1:] for s in seglist: sc += s[0] return sc
7f6585126a0ecdbab4d1d371e23ddc279dce5b75
696,119
def compute_opt_weight(env, t): """ Computes the optimal weight of the risky asset for a given environment at a time t. Arguments --------- :param env : Environment instance Environment instance specifying the RL environment. :param t : int Period in episode for which the optimal weight of the risky asset should be computed. Returns ------- :returns opt : float The optimal weight of the risky asset in the given environment in period t. """ env.time = t # regime in time t: idxt = [t in v["periods"] for v in env.regimes.values()].index(True) rt = list(env.regimes.keys())[idxt] mu = env.regimes[rt]["mu"] sigma = env.regimes[rt]["sigma"] opt = (mu[1] - mu[0] + (sigma[1]**2)/2) / (env.theta * sigma[1]**2) return opt
92a05ea1172871328ab88ac990ed760012634018
696,120
def to_term(pauli): """Convert to Term from Pauli operator (X, Y, Z, I). Args: pauli (X, Y, Z or I): A Pauli operator Returns: Term: A `Term` object. """ return pauli.to_term()
9f38f6f4ab26d63c43ab7b40d390c07ba428c649
696,121
import torch def get_diff(samples_1, samples_2, device): """ Get the frobenius norm of difference between sample_1 and sample_2 :param device: cpu or cuda :param samples_1: (batch_size, c, h, w) or (batch_size, h, w, c) :param samples_2: (batch_size, c, h, w) or (batch_size, h, w, c) :return: (batch_size, 3) dimension tensor of difference at each dimension """ samples_1, samples_2 = samples_1.to(device), samples_2.to(device) if samples_1.shape[1] != 3: samples_1 = samples_1.clone().permute(0, 3, 1, 2).to(device) if samples_2.shape[1] != 3: samples_2 = samples_2.clone().permute(0, 3, 1, 2).to(device) batch_size = samples_1.shape[0] num_channel = samples_1.shape[1] diff = samples_1 - samples_2 return torch.norm(diff.view(batch_size, num_channel, -1), dim=2).to(device)
7879c3b2545ec1f5532e36acffff3e83326f01dc
696,122
def get_max_amount(chart_details: dict) -> dict: """Calculates a maximum amount of Created and Resolved issues. Parameters: ---------- chart_details: Chart coordinates. Returns: ---------- Total counts. """ max_amounts = { "created_total_count": max(chart_details["created_line"].values()), "resolved_total_count": max(chart_details["resolved_line"].values()), } return max_amounts
69dacc2d150aa09ab8e94151d0f1f32a95341ce9
696,123
def is_palindrome(string: str) -> bool: """ Test if given string is a palindrome """ return string == string[::-1]
1a94f7f2889d6d13080198729825347b939a0a68
696,124
import sys def read_dict(line, file_name): """ Read lines from character .txt file into dicts. Turns values into ints when possible. dictionary: dict to be returned pairs: key-value pairs pair[1]: key pair[2]: value """ dictionary = {} pairs = line.strip().split(', ') for i in pairs: pair = i.split(":") try: pair[1] = int(pair[1]) except: # pylint: disable=W0702 sys.exit(f'Non-integer value found in {file_name}.') dictionary[pair[0]] = pair[1] return dictionary
909f219fb7f1104814a8c66c295c459d29b3e74e
696,125
def _format_s3_error_code(error_code: str): """Formats a message to describe and s3 error code.""" return f"S3 error with code: '{error_code}'"
9498a531392f0f18e99c9b8cb7a8364b0bff1f9a
696,126
import numpy def calc_area_array_params(x1, y1, x2, y2, res_x, res_y, align_x=None, align_y=None): """ Compute a coherent min and max position and shape given a resolution. Basically we may know the desired corners but they won't fit perfectly based on the resolution. So we will compute what the adjusted corners would be and number of cells needed based on the resolution provided. ex: (0, 10, 5, 0, 4, 3) would return (0, 0, 8, 12, 2, 4) The minimum is held constant (0,0) the other corner would be moved from (5, 10) to (8, 12) because the resolution was (4,3) and there would be 2 columns (xsize) and 4 rows (ysize) Parameters ---------- x1 an X corner coordinate y1 an Y corner coordinate x2 an X corner coordinate y2 an Y corner coordinate res_x pixel size in x direction res_y pixel size in y direction align_x if supplied the min_x will be shifted to align to an integer cell offset from the align_x, if None then no effect align_y if supplied the min_y will be shifted to align to an integer cell offset from the align_y, if None then no effect Returns ------- min_x, min_y, max_x, max_y, cols (shape_x), rows (shape_y) """ min_x = min(x1, x2) min_y = min(y1, y2) max_x = max(x1, x2) max_y = max(y1, y2) if align_x: min_x -= (min_x - align_x) % res_x if align_y: min_y -= (min_y - align_y) % res_y shape_x = int(numpy.ceil((max_x - min_x) / res_x)) shape_y = int(numpy.ceil((max_y - min_y) / res_y)) max_x = shape_x * res_x + min_x max_y = shape_y * res_y + min_y return min_x, min_y, max_x, max_y, shape_x, shape_y
a02dc70511c28a3f096edbccfac5a4d79be44096
696,127
import hashlib def hash_ecfp_pair(ecfp_pair, size): """Returns an int < size representing that ECFP pair. Input must be a tuple of strings. This utility is primarily used for spatial contact featurizers. For example, if a protein and ligand have close contact region, the first string could be the protein's fragment and the second the ligand's fragment. The pair could be hashed together to achieve one hash value for this contact region. Parameters ---------- ecfp_pair: tuple Pair of ECFP fragment strings size: int, optional (default 1024) Hash to an int in range [0, size) """ ecfp = "%s,%s" % (ecfp_pair[0], ecfp_pair[1]) ecfp = ecfp.encode('utf-8') md5 = hashlib.md5() md5.update(ecfp) digest = md5.hexdigest() ecfp_hash = int(digest, 16) % (size) return (ecfp_hash)
b2e4107ee59ce2c801d10b832e258d567875d987
696,128
import requests def get_keeper_token(base_url, username, password): """Get a temporary auth token from ltd-keeper.""" token_endpoint = base_url + '/token' r = requests.get(token_endpoint, auth=(username, password)) if r.status_code != 200: raise RuntimeError('Could not authenticate to {0}: error {1:d}\n{2}'. format(base_url, r.status_code, r.json())) return r.json()['token']
514c69c8b5ed8f2eb9a237e0fe201da4eac04865
696,129
def calculate_carbon_from_biovolume(invalue, category): """Calculate the cellular carbon from the given biovolume value based on what category the image is assigned and how large it is. Conversion formulas are from Table 4 in Menden-Deuer and Lessard (2000). inputs: invalue (float) = the biovolume value from the features file converted to microns category (str) = the category to which the image was assigned returns: carbon_value (float) = the carbon calculated from the formulas """ diatoms = ['Asterionellopsis', 'Centric', 'Ch_simplex', 'Chaetoceros', 'Corethron', 'Cylindrotheca', 'Cymatosira', 'DactFragCeratul', 'Ditlyum', 'Eucampia', 'Eucampiacornuta', 'Guinardia', 'Hemiaulus', 'Leptocylindrus', 'Licmophora', 'Melosira', 'Odontella', 'Pleurosigma', 'Pseudonitzschia', 'Rhizosolenia', 'Skeletonema', 'Thalassionema', 'Thalassiosira', 'centric10', 'pennate', ] if category in diatoms: if invalue > 3000.: # diatoms > 3000 cubic microns (um**3) carbon_value = (10**(-0.933)) * (invalue ** 0.881) else: carbon_value = (10**(-0.541)) * (invalue ** 0.811) else: if invalue < 3000.: # protist plankton < 3000 cubic microns (um**3) carbon_value = (10**(-0.583)) * (invalue ** 0.860) else: carbon_value = (10**(-0.665)) * (invalue ** 0.939) return carbon_value
9dc978697854482590fdb0ef78b6b903934d97de
696,130
import torch def get_peak_gpu_memory(device='cuda:0'): """ :return: maximum memory cached (Byte) """ return torch.cuda.max_memory_reserved(device)
824816f8fe6c46db784d93de53b4f06b645d3b19
696,131
import os def CurrentWorkingDirectory(): """Return the Current Working Directory, CWD or PWD.""" return os.getcwd()
04620298821c15c0ed651f585f01260627ce803e
696,132
import os import json def load_json_file(json_filename, verbose=False): """Load json file given filename.""" assert os.path.exists(json_filename), f'{json_filename} does not exists!' if verbose: print(f'Loading {json_filename} ...') with open(json_filename, 'r') as f: return json.load(f)
b16c639c0ce92dabaa8d80b2a2eaecdd95affe47
696,133
def sort_lists(sorted_indices, list_to_sort): """ given a list of indices and a list to sort sort the list using the sorted_indices order :param sorted_indices: a list of indices in the order they should be e.g. [0,4,2,3] :param list_to_sort: the list which needs to be sorted in the indice order from sorted_indices :return: sorted list """ return [list_to_sort[i] for i in sorted_indices]
215bc8f29125dcd327b608c0b9d12f7614bd3403
696,134
def ratio_to_int(percentage, max_val): """Converts a ratio to an integer if it is smaller than 1.""" if 1 <= percentage <= max_val: out = percentage elif 0 <= percentage < 1: out = percentage * max_val else: raise ValueError( "percentage={} outside of [0,{}].".format(percentage, max_val)) return int(out)
c2cebcfffcb9e1427036e5dd37a692ab6f451000
696,135
def sum_of_coordinates(point): """Given a 2D point (represented as a Point object), returns the sum of its X- and Y-coordinates.""" return point.getX() + point.getY()
afe177220460366aae35e9012119281d887c247e
696,136
def name_value_str_handler(name): """ Return a generic handler for plain string fields. """ def handler(value, **kwargs): return {name: value} return handler
58c1805a9b081edef850661b7b30a49d044d1a2f
696,137
from typing import AnyStr import json def load_json_dict(text: AnyStr) -> dict: """Loads from JSON and checks that the result is a dict. Raises ------ ValueError if `text` is not valid JSON or is valid JSON but not a dict """ ans = json.loads(text) if not isinstance(ans, dict): raise ValueError('not a dict') return ans
3f35c6eed694f8b8087a7ee1252ef9fa99864280
696,138
import re def remove_superscript_numbers_in_passage(text): """ A helper function that removes all superscript numbers with optional trailing space from a given string. Mainly used to hide passage numbers in a given block of text. :param text: String to process :type text: str :return: String with the superscript numbers that have a trailing space removed :rtype: str >>> remove_superscript_numbers_in_passage('⁰ ¹ ² ³ ⁴ ⁵ ⁶ ⁷ ⁸ ⁹ ') '' >>> remove_superscript_numbers_in_passage('E=mc²') 'E=mc' """ return re.sub(r'[⁰¹²³⁴⁵⁶⁷⁸⁹]+\s?', '', text)
767630743a8a55e1bcb139dc303ffbd778734690
696,139
def default(input_str, name): """ Return default if no input_str, otherwise stripped input_str. """ if not input_str: return name return input_str.strip()
1ba52bbe9118513a0f77abffdbcf3835c5335b35
696,140
def color_green(string: str) -> str: """ Returns string colored with green :param str string: :return str: """ return "\033[92m{}\033[0m".format(string)
60bc4e04a8ada8ac44eb03e069528fe448988ebe
696,141
def trapz(x, y): """Trapezoidal integration written in numba. Parameters ---------- x : array_like sample points to corresponding to the `y` values. y : array_like Input array to integrate Returns ------- total : float Definite integral as approximated by the trapezoidal rule. """ n = x.shape[0] total = 0 for i in range(n - 1): total += 0.5 * (y[i] + y[i + 1]) * (x[i + 1] - x[i]) return total
316d97cc8703dc28c8821c68e21e25bf144a8a54
696,142
def find_sum_subseq(nums, target): """ Let `nums` be a list of positive integers and let `target` be a positive integer Find a contiguous subsequence (of length at least 2) in `nums` whose sum is `target` and return the subsequence indices for slicing if exists, else return False We slide a variable sized window across the `nums` array and track the cumulative sum of array vals ensuring the window always has length at least 2. As `nums` contains only positive integers, adding an element to the end of the window always increases the array sum, whilst removing an element from the start of the window always decreases the array sum. """ low, high = 0, 2 cum_sum = nums[0] + nums[1] while high < len(nums): # Check if the current subsequence (of length at least 2) sums to `target` if cum_sum == target: return low, high # If the cumulative sum is too low or our subsequence has length 2, add another element elif cum_sum < target or high - low == 2: cum_sum += nums[high] high += 1 # Otherwise the cumulative sum exceeds the target and we can remove an element else: cum_sum -= nums[low] low += 1 # Check if we found a suitable subsequence on the last iteration return (low, high) if cum_sum == target else False
eb141ec0892d1d2baf256910e0368704ccda8299
696,143
def key_gen(params): """Generates a fresh key pair""" _, g, o = params priv = o.random() pub = priv * g return (pub, priv)
c554fdcda209d591ac952ea43a69163f0448dd28
696,144
def fill_na(symbols_map, symbols_list): """Fill symbol map with 'N/A' for unmapped symbols.""" filled_map = symbols_map.copy() for s in symbols_list: if s not in filled_map: filled_map[s] = 'N/A' return filled_map
2aafe8e083e23938b002b4dc4d0ad39401cd66fb
696,145
import math def fdis(x, n1, n2): """ 【函数说明】 功能:标准正态分布的概率密度函数(F Distribution) 参数: + x:[float 浮点型] 某个样本点(x)的横坐标值 返回值: + [float 浮点型] 该样本点(x)处的概率密度 """ if(x > 0): y = (math.gamma((n1+n2)/2)*(n1/n2))/(math.gamma(n1/2)*math.gamma(n2/2) ) * ((n1/n2)*x)**((n1/2)-1) * (1+(n1*x/n2))**(-(n1+n2)/2) return y elif(x <= 0): return 0
96abbe1bc3ff7988bd9a7349e7d26e657bfe6258
696,146
def is_dummy_vector(vector, vector_length=None): """ Return True if the vector is the default vector, False if it is not. """ if vector_length is None: vector_length = len(vector) return vector == [1e-7] * vector_length
0d53a6e9a6fdc7d382ad57630fdbf5e73a32507a
696,147
import requests def execute_graphql_request(authorization: str, payload: dict): """Method to execute http request on the GraphQL API.""" print(2) url = 'http://graphql:5433/graphql' # url = get_parameter('graphql', 'url') headers = {'Content-Type': 'application/json'} if authorization: headers['Authorization'] = authorization response = requests.post(url, headers=headers, json=payload) print(response) data = response.json() return data
7c9535e29c02e1ed674cee26d4bb200687e0a42f
696,148
import os def normalize_path(path): """ Converts the supplied path to lower case, removes any unnecessary slashes and makes all slashes forward slashes. This is particularly useful for Windows systems. It is recommended to use in your settings if you are on Windows. MEDIA_ROOT = normalize_path(os.path.join(DIRNAME, 'static/')) """ return os.path.normcase(os.path.normpath(path)).replace('\\', '/')
9ad60955bca0b8e15183f435729fc81bfe8576a4
696,149
def filterUsage(resource, value): """ Indicates how the filter criteria is used. E.g., if this parameter is not provided, the Retrieve operation is for generic retrieve operation. If filterUsage is provided, the Retrieve operation is for resource <discovery>. :param resource: :type resource: :param value: :type value: bool :return: :rtype: bool """ # if value: # return True # else: # return False return True
863be4222cbbc9b95fcceef2bdefa3fc6c461e18
696,152
def normaliza(x, mu, des_std): """ Normaliza los datos x @param x: un ndarray de dimension (M, n) con la matriz de diseño @param mu: un ndarray (n, ) con las medias @param des_std: un ndarray (n, ) con las desviaciones estandard @return: un ndarray (M, n) con x normalizado """ return (x - mu) / des_std
8b8a51e8e3238dceeeebc6496f43e7d1eb10abde
696,153
def partialdate_to_string(obj): """ :type obj: :class:`mbdata.types.PartialDate` """ args = [] formatstring = "" if obj.year is not None and obj.year != 0: formatstring += "%04d" args.append(obj.year) if obj.month is not None and obj.month != 0: formatstring += "-%02d" args.append(obj.month) if obj.day is not None and obj.day != 0: formatstring += "-%02d" args.append(obj.day) return formatstring % tuple(args)
32b7ea12cb18291898d3b9bd42966ba12dbfabe4
696,154
def key_matches_x509_crt(key, crt): """ Verify that the public key derived from the given private key matches the private key in the given X.509 certificate. :param object key: A private key object created using load_privkey() :param object crt: An X.509 certificate object created using load_x509() :rtype bool: True, iff the key matches the certificate """ return crt.public_key().public_numbers() == key.public_key().public_numbers()
f3a9cd3cbfc9df9d0095c0562c3251174a98c141
696,155
def _clean_header(text: str, is_unit: bool = False) -> str: """ Extract header text from each raw trajectory summary csv file header. :param text: Raw trajectory summary csv column header text. :param is_unit: If True, return text with brackets for units. :returns: Formatted text. """ # Return an empty string if there is no header found if "Unnamed" in text: return "" # Removes additional spaces and hashtags from text. Add brackets optionally. clean_header = " ".join(text.replace("#", "").split()) if is_unit: clean_header = f" ({clean_header})" return clean_header
000ab01267e78d621fd8a8e6844523e7fa909ba4
696,156
def generate_exhibit(reported_message, report_id): """ generates exhibit doc to add to db """ exhibit = { # info of REPORTED MESSAGE 'reported_message_id': reported_message.id, 'reported_user_id': reported_message.author.id, # id of reported user (access) 'reported_message': reported_message.content, # content of ORIGINAL reported message 'reported_embed': [o.url for o in reported_message.embeds], # any embeds in message (NEED ARRAY OF OBJECTS) 'reported_attachments': [o.url for o in reported_message.attachments], # any attachments in message (NEED ARRAY OF OBJECTS) 'reported_timestamp': reported_message.timestamp, # time message sent 'reported_edits': [] if reported_message.edited_timestamp is None else [(reported_message.edited_timestamp, reported_message.content)], # array of edited messages as detected by bot on checks 'deleted': False, # confirms if message is deleted 'times_reported': 1, 'reports': [int(report_id)] # reports made about message } return exhibit
c58dd271c2f72e2ec95c45d74f110b05af905ddb
696,157
def find_return_probabilities(returns_list, return_mean): """ finds probabilitiy of each outcome given a list of recent returns. returns (pr(high_return), pr(low_return)) """ high_return = 0 low_return = 0 high_prev_and_high_day = 0 high_prev_and_low_day = 0 low_prev_and_high_day = 0 low_prev_and_low_day = 0 count = 0 for i in range(1, len(returns_list)): count += 1 if returns_list[i-1] > return_mean: if returns_list[i] > return_mean: high_return += 1 high_prev_and_high_day += 1 else: low_return += 1 high_prev_and_low_day += 1 else: if returns_list[i] > return_mean: high_return += 1 low_prev_and_high_day += 1 else: low_return += 1 low_prev_and_low_day += 1 if returns_list[len(returns_list)-1] > return_mean: # (high high, high low) return ((high_prev_and_high_day/count)/(high_return/count), (high_prev_and_low_day/count)/(low_return/count)) else: # (low high, low low) return ((low_prev_and_high_day/count)/(high_return/count), (low_prev_and_low_day/count)/(low_return/count))
7acabb5e6062a57b10aef2836eb3ddab813425a4
696,158
def getRootLayer(layer): """ Find the root of the layer by going up the parent chain. \nin: MixinInterface.LayerProperties \nout: MixinInterface.LayerProperties """ if(layer.getParent() != None): return getRootLayer(layer.getParent()) else: return layer
11f39c4a91d3399f4eaa152bf6408ac5ce1ea866
696,159
def get_conv_type(data): """ Get the convolution type in data. """ conv_type = data.conv_type.iloc[0] assert (data.conv_type == conv_type).all() return conv_type
5b66291790b921a917643cf3a3b2a96fc3c58243
696,160
import os def read_test_files(test_file) -> list: """Read test files from txt file""" assert os.path.exists(test_file) with open(test_file, "r") as f: lines = f.readlines() lines = [l.strip() for l in lines] return lines
6c79be2efe3f8ff12626126553d4664b0c22083d
696,162
def extract_website_datas(website_container): """Function to extract website datas, return a dictionary with the name of the header like key and a list of link of websites like value""" website_info={} #get all website links links_containers = website_container.find_elements_by_tag_name('a') #initialise object website_links, that will contain a list of website links website_links=[] #browse all object in links_containers for elements in links_containers: #get and add website link in websites_links website_links.append(elements.get_attribute('href')) #add header like key and website_links like value in website_info website_info["Websites"] = website_links return website_info
58da7d9aa228814281093d98fdf07783d467cf3b
696,163
def contains_toxicity(perspective_response): """Checking/returning comments with a toxicity value of over 50 percent.""" is_toxic = False if (perspective_response['attributeScores']['TOXICITY']['summaryScore'] ['value'] >= .5): is_toxic = True return is_toxic
3d70476193f160f3c79eb91568290a0943f54f7a
696,164
def bedToMultizInput(bedInterval): """ Generate the proper input for fetching multiz alignments input: pybedtools Interval class output: chr, start, stop, strand """ chromosome = bedInterval.chrom chromosome = chromosome.replace("chr", "") if bedInterval.strand == "+": strand = 1 else: strand = -1 return(chromosome, bedInterval.start, bedInterval.stop, strand)
1f8492872144c301314bcc217948e0449b242340
696,165
def comp_angle_rotor_initial(self): """Set rotor initial angle as 0 Parameters ---------- self : Machine A: Machine object Returns ------- angle_rotor_initial: float rotor initial angle set to 0 [rad] """ return 0
c2deed90b342b80d9a4fc3b872d2d1a70b4ac053
696,166
def datavalidation_result_format_change(log): """ Change the format of result log ex:[{"id":1,"name":"hj","quantity":2}] Args: log: log of datavalidation Returns: Changed format of log ex:[["id","name","quantity"],[1,"hj",2]] """ result = [] keys = [] for key, value in log[0].items(): keys.append(key) result.append(keys) for dic in log: values = [] for key, value in dic.items(): values.append(value) result.append(values) return result
4eb91d7b0a580aa02cdc69ec4585f22c30d0d233
696,167
def find_with(f, iter, default=None): """Find the value in an iterator satisfying f(x)""" return next((x for x in iter if f(x)), default)
8fac8902b8baaf1a28a83227bef3933a7b8cb293
696,168
def athinput(filename): """Read athinput file and returns a dictionary of dictionaries.""" # Read data with open(filename, 'r') as athinput: # remove comments, extra whitespace, and empty lines lines = filter(None, [i.split('#')[0].strip() for i in athinput.readlines()]) data = {} # split into blocks, first element will be empty blocks = ('\n'.join(lines)).split('<')[1:] # Function for interpreting strings numerically def typecast(x): if '_' in x: return x try: return int(x) except ValueError: pass try: return float(x) except ValueError: pass try: return complex(x) except ValueError: pass return x # Function for parsing assignment based on first '=' def parse_line(line): out = [i.strip() for i in line.split('=')] out[1] = '='.join(out[1:]) out[1] = typecast(out[1]) return out[:2] # Assign values into dictionaries for block in blocks: info = list(filter(None, block.split('\n'))) key = info.pop(0)[:-1] # last character is '>' data[key] = dict(map(parse_line, info)) return data
2fb9f499ff75fc3b61afd655baea7b92132fabcb
696,169
def vals_sortby_key(dict_to_sort): """ sort dict by keys alphanumerically, then return vals. Keys should be "feat_00, feat_01", or "stage_00, stage_01" etc. """ return [val for (key, val) in sorted(dict_to_sort.items())]
4c5537dc555d92b1f4084821aa56cfb118d2a871
696,170
def calc_bg(spec, c1, c2, m): """Returns background under a peak""" if c1 > c2: raise ValueError("c1 must be less than c2") if c1 < 0: raise ValueError("c1 must be positive number above 0") if c2 > max(spec.channels): raise ValueError("c2 must be less than max number of channels") if m == 1: low_sum = sum(spec.counts[c1 - 2 : c1]) high_sum = sum(spec.counts[c2 : c2 + 2]) bg = (low_sum + high_sum) * ((c2 - c1 + 1) / 6) else: raise ValueError("m is not set to a valud method id") return bg
bf657e8451ad17f5bd23abe98297bed7e776deb7
696,171
def Delta(a, b, gapopen = -0.5, gapext = -0.7): """ Helper function for swalignimpconstrained for affine gap penalties """ if b > 0: return 0 if b == 0 and a > 0: return gapopen return gapext
303ee6d4436f3338ea130416459c22d40aff0272
696,172
def no_add_maintenance_event_form_data(): """gives invalid data for a test""" return {"title": "", "description": "", "event_start": "", "event_end": ""}
f72553f78cee262f4d23e8c09289fa655f22788d
696,173
def overlap_hashes(hash_target, hash_search): """Return a set of hashes common between the two mappings""" return set(hash_target).intersection(set(hash_search))
34d94ed23694d61d1c4da2717640c8ed5a9539db
696,175
def generate_conf(topics_partitions, broker_remove, broker_add, replication_factor): """ Generate the needed config for the reassignment tool and rollback process Args: topics_partitions (list): List that each entry represents a topic with its partitions broker_remove (int): Broker id that is being decommissioned broker_add (int): Broker id that is being added replication_factor(int): Number of ISR each partition should have Returns: dict: Dict that contains two entries, the "rollback" entry which represents the state of the cluster before the reassignment AND "reassignment" which contains the configuration needed for the reassignment tool. """ reassignment_partitions_conf = [] reassignment_conf = {"version": 1, "partitions": []} rollback_partitions_conf = [] rollback_conf = {"version": 1, "partitions": []} leader_reassign_count = 0 follower_reassign_count = 0 for topic_partitions in topics_partitions: for partition_number in topic_partitions["partitions"]: topic_name = topic_partitions["topic_name"] print("Examining topic: {0} partition {1} . . .".format(topic_name, partition_number)) in_sync_replicas = topic_partitions["partitions"][partition_number] # set up the another rollback entry for the specific topic/partition rollback_topic_dict = { "topic": topic_name, "partition": int(partition_number), "replicas": in_sync_replicas } rollback_partitions_conf.append(rollback_topic_dict) # if the broker id in found in the ISR modify the entry if broker_remove in in_sync_replicas: reassigned_in_sync_replicas = in_sync_replicas[:] i = list(reassigned_in_sync_replicas).index(broker_remove) if i == 0: # the first entry in the isr list is the leader leader_reassign_count += 1 print("Replacing leader for topic {0} and partition {1}" .format(topic_name, partition_number)) else: follower_reassign_count += 1 print("Replacing follower for topic {0} and partition {1}" .format(topic_name, partition_number)) reassigned_in_sync_replicas[i] = broker_add if len(reassigned_in_sync_replicas) > replication_factor: print("Found more ISR {0} than replication factor for topic: {1} and partition {2}" .format(reassigned_in_sync_replicas, topic_name, partition_number)) reassigned_in_sync_replicas = reassigned_in_sync_replicas[:replication_factor] reassign_topic_dict = { "topic": topic_name, "partition": int(partition_number), "replicas": reassigned_in_sync_replicas } reassignment_partitions_conf.append(reassign_topic_dict) print("- - - - - - - - - - - - - - ") reassignment_conf["partitions"] = reassignment_partitions_conf rollback_conf["partitions"] = rollback_partitions_conf print("Total leader reassignments: {0}".format(leader_reassign_count)) print("Total follower reassignments: {0}".format(follower_reassign_count)) return { "rollback": rollback_conf, "reassignment": reassignment_conf }
1e78975916f0484c14d072d995edaaf18ece5b09
696,176
def get_game_state_own(pieces): """ :returns state in form of 0-|10-13,15,|41-38,45,|10-14,| """ tiles_horizon = 6 # get the state of 4 pawns with +-6 fields players = pieces[0] player_0 = players[0] # check all the players and their all pieces and tell when they are in range of +-6 pieces from "my_pawn" """ somehow save the information about the state in a string maybe? In general it works but cant print when somebody was struck out! """ distance_between_players = 13 state = "" for my_pid in range(len(player_0)): my_pawn = player_0[my_pid] if my_pawn == 0: state += str(my_pawn) + "-|" else: state += str(my_pawn) + "-" for i_player in range(1, len(players)): # dont check own pawns for i_pawn in range(len(players[i_player])): # we have to check if they are nearby enemy pawns any our pawn enemy_pawn_now = players[i_player][i_pawn] if enemy_pawn_now != 0: # map enemy position to player_0 position - WORKS enemy_pawn_now += i_player * distance_between_players if enemy_pawn_now > 53: enemy_pawn_now -= 53 # check if enemy pawns in radius of horizon if enemy_pawn_now in range(my_pawn - tiles_horizon, my_pawn + tiles_horizon): state += str(enemy_pawn_now) + "," print("found enemy[%d] pawn = %d near player_0[%d] = %d" % (i_player, enemy_pawn_now, my_pid, my_pawn)) if my_pawn == enemy_pawn_now: print("Someone should die") # 37, 40, 19 state += "|" return state
60dc3f1fa8e0df61e6e39c89fbed7c24b5203553
696,177
def _get_byte_size_factor(byte_suffix: str) -> int: """ Returns the factor for a specific bytesize. """ byte_suffix = byte_suffix.lower() if byte_suffix == "b": return 1 if byte_suffix in ("k", "kb"): return 1024 if byte_suffix in ("m", "mb"): return 1024 * 1024 if byte_suffix in ("g", "gb"): return 1024 * 1024 * 1024 if byte_suffix in ("t", "tb"): return 1024 * 1024 * 1024 * 1024 raise ValueError("Unsupported byte suffix")
eb7b0aaf03c6b231306980568fc93a45303d022b
696,178
import math def constrained_factorial(x): """ Same as `math.factorial`, but raises `ValueError` if x is under 0 or over 32,767. """ if not (0 <= x < 32768): raise ValueError(f"{x!r} not in working 0-32,767 range") if math.isclose(x, int(x), abs_tol=1e-12): x = int(round(x)) return math.factorial(x)
5bb853a479279aa124a271d9ff719e060bd18608
696,179
def original_contig_name(s): """Transform s to the original contig name""" n = s.split(".")[-1] try: int(n) except: return s, 0 # Only small integers are likely to be # indicating a cutup part. if int(n) < 1000: return ".".join(s.split(".")[:-1]), n else: # A large n indicates that the integer # was part of the original contig return s, 0
ff5f256a3d5c2632f52cc26cdc92d9a333c701e9
696,180
def tuple_pull_to_front(orig_tuple, *tuple_keys_to_pull_to_front): """ Args: orig_tuple: original tuple of type (('lS', 5.6), ('lT', 3.4000000000000004), ('ZT', 113.15789473684211), ('ZS', 32.10526315789474)) *tuple_keys_to_pull_to_front: keys of those tuples that (in the given order) should be pulled to the front of the orig_tuple """ orig_lst = list(orig_tuple) new_lst = [] new_lst2 = [] for otup in orig_lst: if otup[0] in tuple_keys_to_pull_to_front: new_lst.append(otup) else: new_lst2.append(otup) new_lst.extend(new_lst2) return new_lst
7177cc672747a42d2b4edb85a1ec95b1a289e5ab
696,181
def format_results(fuzzed_queries, formatter, responses): """ Format results """ assert len(fuzzed_queries) == len(responses) for idx, result in enumerate(fuzzed_queries): response = responses[idx] if len(response) > 1: breakpoint() result["response"] = [response[0].replace("[UNK]", "_unk_")] result["emotion"] = formatter.data["emotion"][idx] result["vads"] = formatter.data["vads"][idx] return fuzzed_queries
6208be9ae5c99c1a372a32749709801a4a782924
696,182
import os def check_dir(save_dir): """ Creates dir if not exists""" if not os.path.exists(save_dir): print("Directory %s does not exist, making it now" % save_dir) os.makedirs(save_dir) return False else: print("Directory %s exists, all good" % save_dir) return True
1c44349413e1ae57510dd2a9516f33193e8852d8
696,183
from typing import Any def default_function(n: int, value: Any=None): """ Creates a dummy default function to provide as default value when a func parameter is expected. `n` is the number of parameters expected. `value` is the default value returned by the function """ if n == 0: return lambda: value elif n == 1: return lambda _: value elif n == 2: return lambda _,__: value else: raise Exception('Default function with {} parameters is not supported.'.format(n))
d8245fed39e423392acfbffd775379a2e15a8848
696,184
def valid_alien_token() -> bool: """Check if there is a valid AliEn token. The function must be robust enough to fetch all possible xrd error states which it usually gets from the stdout of the query process. Args: None. Returns: True if there is a valid AliEn token, or false otherwise. """ # With JAliEn, this information is no longer available, so this is a no-op # that always just returns True. return True
022d38473d7aba3cf30c35ac0534d0d841bac0c6
696,185
def case_data(request): """ A :class:`.CaseData` instance. """ return request.param
2eaeaa8d9d41bf1cc9e886e071a45394d9760f03
696,186
def byte_literal(b): """ If b is already a byte literal, return it. Otherwise, b is an integer which should be converted to a byte literal. This function is for compatibility with Python 2.6 and 3.x """ if isinstance(b, int): return bytes([b]) else: return b
88756b37de6884b3e68373756af76e849815786f
696,187
def _get_extent(gt, cols, rows): """ Return the corner coordinates from a geotransform :param gt: geotransform :type gt: (float, float, float, float, float, float) :param cols: number of columns in the dataset :type cols: int :param rows: number of rows in the dataset :type rows: int :rtype: list of (list of float) :return: List of four corner coords: ul, ll, lr, ur >>> gt = (144.0, 0.00025, 0.0, -36.0, 0.0, -0.00025) >>> cols = 4000 >>> rows = 4000 >>> _get_extent(gt, cols, rows) [[144.0, -36.0], [144.0, -37.0], [145.0, -37.0], [145.0, -36.0]] """ ext = [] xarr = [0, cols] yarr = [0, rows] for px in xarr: for py in yarr: x = gt[0] + (px * gt[1]) + (py * gt[2]) y = gt[3] + (px * gt[4]) + (py * gt[5]) ext.append([x, y]) yarr.reverse() return ext
95dfa01251925522b282450219d1d040f928f405
696,188