content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import string def range_converter(xl_col_length=3): """ Construct conversions between Excel array ranges and Pythonic indices (up to column ZZ in Excel) :param xl_col_length: Length of the longest desired Excel column (e.g., 2 for "A" to "ZZ", 3 for "A" to "ZZZ") """ alpha_initial = string.ascii_uppercase alpha_extended = list(string.ascii_uppercase) if xl_col_length == 1: pass else: # Expand list with same lexicographic ordering as # Excel (e.g. "Z" is followed by "AA", "AZ" by "BA") for k in range(2, xl_col_length + 1): new_sequences = list() for letter_sequence in alpha_extended: for new_letter in alpha_initial: new_sequences.append("".join([letter_sequence, new_letter])) alpha_extended.extend(new_sequences) convert = zip(range(1, len(alpha_extended) + 1), alpha_extended) convert_to_alpha = {x: y for x, y in convert} convert_to_num = {y: x for x, y in convert_to_alpha.items()} return convert_to_alpha, convert_to_num
f904309b2428700005254b45567d2d503c2d3aa8
222,839
def parse_filename(fname, return_ext=True, verbose=False): """ Parses `fname` (in BIDS-inspired format) and returns dictionary Parameters ---------- fname : str os os.PathLike Filename to parse return_ext : bool, optional Whether to return extension of `fname` in addition to key-value dict. Default: False verbose : bool, optional Whether to print status messages. Default: False Returns ------- info : dict Key-value pairs extracted from `fname` ext : str Extension of `fname`, only returned if `return_ext=True` """ try: base, *ext = fname.split('.') fname_dict = dict([ pair.split('-') for pair in base.split('_') if pair != 'feature' ]) except ValueError: print('Wrong filename format!') return if verbose: print(fname_dict) if return_ext: return fname_dict, '.'.join(ext) return fname_dict
1512b50fa6d07a0bcbb69831418a935f28abe2d8
700,283
from datetime import datetime def fromisoformat(isoformat): """ Return a datetime from a string in ISO 8601 date time format >>> fromisoformat("2019-12-31 23:59:59") datetime.datetime(2019, 12, 31, 23, 59, 59) """ try: return datetime.fromisoformat(isoformat) # Python >= 3.7 except AttributeError: return datetime.strptime(isoformat, "%Y-%m-%d %H:%M:%S")
bcb7e277e907a5c05ca74fd3fbd7d6922c0d7a36
30,073
import re def is_indvar(expr): """ An individual variable must be a single lowercase character other than 'e', 't', 'n', 's', followed by zero or more digits. @param expr: C{str} @return: C{boolean} True if expr is of the correct form """ assert isinstance(expr, str), "%s is not a string" % expr return re.match(r'^[a-df-mo-ru-z]\d*$', expr)
c00e62199263214596a0b9519868ffdeb86e9580
11,004
def add_decorators(p): """Adds ping and normalize decorators to pool.""" @p.ping def ping(con): return True @p.normalize_connection def normalize(con): pass
ba2b02564ff0b2b07ca301cf89b215b71102090b
89,972
def readtxt(ifile): """ Simple function to read text file and remove clean ends of spaces and \n""" with open(ifile, 'r') as f: content = f.readlines() # remove whitespace characters like `\n` at the end of each line content = [x.strip() for x in content] return content
8a6490a9e2a49bb0a9fc9c0ac44b03de6b7addbc
592,344
def get_default_values(json_data): """ Method to make default values dict for properties in json schema. Set default value as None if property havent 'default' key in schema :param json_data: json schema with properties as dict with 'default' keys or without it :return: dict with default values for all properties in given schema """ default_properties_values = dict() # make defaults for all properties for property_name in json_data['properties']: # if schema have 'default' value for property if 'default' in json_data['properties'][property_name].keys(): default_value = json_data['properties'][property_name]['default'] # use None as default default value else: default_value = None default_properties_values[property_name] = default_value return default_properties_values
e5727d04fd0e2462a653d8ec4d0d5b2c7e83dc77
524,382
import torch def compute_rank(predictions, targets): """Compute the rank (between 1 and n) of of the true target in ordered predictions Example: >>> import torch >>> compute_rank(torch.tensor([[.1, .7, 0., 0., .2, 0., 0.], ... [.1, .7, 0., 0., .2, 0., 0.], ... [.7, .2, .1, 0., 0., 0., 0.]]), ... torch.tensor([4, 1, 3])) tensor([2, 1, 5]) Args: predictions (torch.Tensor): [n_pred, n_node] targets (torch.Tensor): [n_pred] """ n_pred = predictions.shape[0] range_ = torch.arange(n_pred, device=predictions.device, dtype=torch.long) proba_targets = predictions[range_, targets] target_rank_upper = (proba_targets.unsqueeze(1) < predictions).long().sum(dim=1) + 1 target_rank_lower = (proba_targets.unsqueeze(1) <= predictions).long().sum(dim=1) # break tighs evenly by taking the mean rank target_rank = (target_rank_upper + target_rank_lower) / 2 return target_rank
0aed5b14ef9b0f318239e98aa02d0ee5ed9aa758
13,819
import torch def euclidean_dist(x, y): """ Distance calculation between two sets of vectors x (n x d) and y (m x d) """ n = x.size(0) m = y.size(0) d = x.size(1) assert d == y.size(1) x = x.unsqueeze(1).expand(n, m, d) y = y.unsqueeze(0).expand(n, m, d) return torch.pow(x - y, 2).sum(2)
31d1e60eac411e87b8c269a713b86b16ef7aa481
608,669
def merge(a, b): """ merge two already sorted lists """ s = [] index_a = 0; index_b = 0; while index_a < len(a) and index_b < len(b): if a[index_a] < b[index_b]: s.append(a[index_a]) index_a += 1 else: s.append(b[index_b]) index_b += 1 if index_a == len(a): s.extend(b[index_b:]) else: s.extend(a[index_a:]) return s
10a337af7c42fac382774e95012ab0286db7483c
403,695
from typing import Iterable def unique_list_conserving_order(iterable: Iterable) -> list: """List of unique elements in the original order >>> unique_list_conserving_order([3, 2, 3, 5, 1, 2, 6, 3, 5]) [3, 2, 5, 1, 6] """ seen = set() return [x for x in iterable if len(seen) < len(seen.add(x) or seen)]
861dc58dc556b54c72f44afe2372b4e0f1b6cf0d
444,410
def deepmap(func, seq): """ Apply function inside nested lists >>> inc = lambda x: x + 1 >>> deepmap(inc, [[1, 2], [3, 4]]) [[2, 3], [4, 5]] """ if isinstance(seq, list): return [deepmap(func, item) for item in seq] else: return func(seq)
43856a93e472f30b84bf842586003952649369c0
29,678
def two_gaussian_potential_bc(vnew, f2, coords): """ Apply Boundary Condition to the potential, force, and coordinates. Parameters: ----------- vnew : float (or array of floats) Potential Energy f2 : float (or array of floats) Force coords : float coordinates Returns: -------- vnew : float (or array of floats) Adjusted potential energy from boundary condition F : float (or array of floats) Adjusted force from boundary condition coords : float adjusted coordinates from boundary condition bcbias : float bias applied strictly from the boundary condition """ vold = vnew bcbias = 0 is_periodic = False if (coords < -4.3193): vnew = 100.0 * (coords+4.0)**4.0 - 1.690133 f2 = -100.0 * 4.0 * (coords+4.0)**3.0 bcbias = vnew - vold elif (coords > 4.25882): vnew = 100.0 * (coords-4.0)**4.0 - 0.845067 f2 = -100.0 * 4.0 * (coords-4.0)**3.0 bcbias = vnew - vold return (vnew, f2, coords, bcbias, is_periodic)
2d9eda2eb4db4a800f72f7b7eaf61c8508c5f848
59,885
def escape(string): """Escape strings to be SQL safe""" if '"' in string: raise Exception("Can't escape identifier {} because it contains a backtick" .format(string)) return '"{}"'.format(string)
054b3b2908d10f82be41bfaa2e587d4e800dea84
653,644
def is_valid_channel_name(name): """ Return True if the given name is a valid channel name. """ # Check channel name length if len(name) == 0 or len(name) > 50: return False # Check if channel name is valid valid = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789`~!@#$%^&*()-=_+[]{}\\|;':\"./<>?" # noqa invalid = set(name) - set(valid) return not bool(invalid)
fd4db526972746d0f4fda9d8055e58c93da131c3
606,234
def env_wise_score(estimator, X, y, scorer, env, env_column): """ Filter data to evaluate only a specific environment using a certain scorer. """ env_mask = X[env_column] == env evaluation = scorer(estimator, X[env_mask], y[env_mask]) return evaluation
d78f8c5e18e8b74395101eb2ca45c238c91930db
492,311
def execute(conn, query, *args): """Execute query in connection""" cursor = conn.cursor() cursor.execute(query, *args) return cursor.fetchall()
9828119c07fba5f91cc7aed28d297d49b87d7c13
471,460
def infinity_norm(x): """ Compute infinity norm of x. :param x: rare matrix represented as dictionary of dictionaries of non zero values :return max_value: maximum value of x """ max_value = None for line in x.keys(): for column in x[line].keys(): if not max_value or max_value < x[line][column]: max_value = x[line][column] return max_value
f2fa9bf4478c77251cc00c7831fef67751556d0e
289,616
def error_format(search): """ :param search: inputted word :return: bool. Checking every element in the inputted word is in alphabet. """ for letter in search: if letter.isalpha() is False: return True
3b6f6778cfe41e8a2e535f4fce71a7df602bced5
676,175
def double_char(s): """Return word with double characters.""" word = '' for i in s: word += i * 2 return word
7460bf28267dafa68b776c4b24d32c9084272731
44,684
from typing import OrderedDict def group_unique_values(items): """group items (pairs) into dict of lists. Values in each group stay in the original order and must be unique Args: items: iterable of key-value pairs Returns: dict of key -> lists (of unique values) Raises: ValueError if value in a group is a duplicate """ result_lists = OrderedDict() result_sets = OrderedDict() for key, val in items: if key not in result_lists: result_lists[key] = [] result_sets[key] = set() if val in result_sets[key]: raise ValueError("Duplicate value: %s" % val) result_sets[key].add(val) result_lists[key].append(val) return result_lists
cd25d657117b34fe408c27149ddf034f3956383d
28,917
def wordCounter(word:str)->int: """ 글자수 세는 프로그램 input: str type의 word output: int type의 글자 수 """ cnt = 0 a = word.split() cnt = len(a) return cnt
b21d94664b75c6a0aa9a7403defe726330a8c0e9
310,756
def grad(u, *args): """ Compute gradient for the cost function f(u) = ||u-B^-1Av||_B as del(f(u)) = Bu - Av """ A,B,v = args gradient = B@u - A@v return gradient
6ee3275bdd76e2dd3451c9078353fa05dfecbe8f
509,360
def getKeyIdx(key, key2Idx): """Returns from the word2Idx table the word index for a given token""" if key in key2Idx: return key2Idx[key] return key2Idx["UNKNOWN_TOKEN"]
0638c7bd9bc03ca74fa2aff826cf80dac15e3bd0
128,744
import glob def get_file_list(search_string): """ Generates a list of files matching the given glob string Parameters: search_string: string glob-formatted search pattern Returns: file_list: list list of files matching pattern Outputs: nothing """ file_list = glob.glob(search_string) return file_list
6994b3d87c6a7c74e2d63513a5c5e43c72f3d54a
526,547
def clean_bibitem_string(string): """Removes surrounding whitespace, surrounding \\emph brackets etc.""" out = string out = out.strip() if out[:6] == "\\emph{" and out[-1] == "}": out = out[6:-1] if out[:8] == "\\textsc{" and out[-1] == "}": out = out[8:-1] return out
7420862d495dbb1faa35866da772b63d0bb0ccc3
295,407
def get_status_embeded_detail(status_embedded): """ Retrieve details of status related fields. :param status_embedded: status details from response :return: status detail :rtype: dict """ return { 'State': status_embedded.get('state', ''), 'StateName': status_embedded.get('stateName', ''), 'StateDescription': status_embedded.get('stateDescription', ''), 'Status': status_embedded.get('status', ''), 'DurationInDays': status_embedded.get('durationInDays', ''), 'DueDate': status_embedded.get('dueDate', ''), 'ExpirationDate': status_embedded.get('expirationDate', '') }
6605b6daf1d2b80f2f8dc2e545f80059b610c17b
426,219
def _get_skip_class_names(skip_class_map): """Returns list of class names to skip Returned list only contains names of classes where all methods are skipped. If skip_class_map is None, returns None :param skip_class_map: Result of passing parsed arg for --skip command line argument to parse_test_names() :return: List of class names to skip """ if skip_class_map: return [ class_name for class_name, methods in skip_class_map.items() if not methods ] return None
892c4d6f70bbf5942dc5f480ba8cdb8c234d49e1
240,497
import logging def _test_usability(imported_module, entity_type, entity_ids_arr): """ This returns None if a module of a script is usable, otherwise an error message which explains why this script cannot be used in this context: Wrong platform, unavailable resources etc... """ # PROBLEM: When an entire directory is not Usable because the file __init__.py # has a function Usable which returns False, then it still displays a directory, alone. # Unusable scripts are not displayed in the menu of the scripts of an entity, # except if a special flag is given, and in this case these error messages are displayed. try: is_usable = imported_module.Usable(entity_type, entity_ids_arr) except AttributeError as exc: logging.info("entity_type=%s entity_ids_arr=%s module %s has no Usable attribute: Caught %s", entity_type, str(entity_ids_arr), imported_module.__name__, exc) return None except Exception as exc: logging.error("entity_type=%s entity_ids_arr=%s module %s: Caught %s", entity_type, str(entity_ids_arr), imported_module.__name__, exc) return None if is_usable: return None error_msg = imported_module.Usable.__doc__ if error_msg: error_msg = error_msg.strip() # Just take the first line of __doc__ string. error_msg = error_msg.split("\n")[0] else: error_msg = imported_module.__name__ if error_msg: error_msg += " not usable" else: error_msg = "No message" return error_msg
c317282b465dd7b9a230c07f37bb6e678a3b62c5
429,031
def pitch_to_midi_pitch(step, alter, octave): """Convert MusicXML pitch representation to MIDI pitch number.""" pitch_class = 0 if step == 'C': pitch_class = 0 elif step == 'D': pitch_class = 2 elif step == 'E': pitch_class = 4 elif step == 'F': pitch_class = 5 elif step == 'G': pitch_class = 7 elif step == 'A': pitch_class = 9 elif step == 'B': pitch_class = 11 else: pass pitch_class = (pitch_class + int(alter)) midi_pitch = (12 + pitch_class) + (int(octave) * 12) return midi_pitch
3f506c82523042ad6fcf5f34990ed2f6d4dc9316
399,932
def get_density(tensor): """ Return density of the tensor """ shape = tensor.shape dense_vals = 1.0 for i in shape: dense_vals *= i return dense_vals/tensor.nnz
249f200cd2d2f2c8059c38b6f9e7da1dd595b26a
462,379
def lint(ctx): """Run the linter.""" return ctx.run('flake8 calleee tests', pty=True).return_code
74c9dc88c9f1a5dc552e4f9cc7e24728c518bf49
537,144
import re def _match_label(pattern, issue): """ :param pattern: :param issue: :return: [True] if issue contains the pattern >>> _match_label('aaa', {'labels': [{'name': 'aaa'}]}) True >>> _match_label('aaa', {'labels': [{'name': 'bbb'}]}) False """ return any(re.search(pattern, label['name'], re.IGNORECASE) for label in issue['labels'])
e87d603d213950f86d9f968a97bdc8e752f1b79f
448,080
def sizeof_fmt(num, suffix='B'): """Returns human-readable file size Supports: * all currently known binary prefixes * negative and positive numbers * numbers larger than 1000 Yobibytes * arbitrary units (maybe you like to count in Gibibits!) Example: -------- >>> sizeof_fmt(168963795964) '157.4GiB' Parameters: ----------- num : int Size of file (or data), in bytes suffix : str, optional Unit suffix, 'B' = bytes by default Returns: -------- str <floating number><prefix><unit> """ for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']: if abs(num) < 1024.0: return "%3.2f %s%s" % (num, unit, suffix) num /= 1024.0 return "%.2f %s%s" % (num, 'Yi', suffix)
ba1b6cc8b7121a87ccee00bc333d77e4b251dd61
645,680
def min_value(uncert_val): """Minimum confidence interval for a ufloat quantity.""" return uncert_val.nominal_value - uncert_val.std_dev
d8a227c92d89958cc5a4af4991c5f28e80445df7
213,004
def is_valid_port(port): """Checks if a given value might be an existing port. Must be between 1 and 65535, both included. :param port: Port candidate :type port: int, str :returns: True if is valid, False if not :rtype: bool """ try: int_port = int(port) except ValueError: return False return 0 < int_port < 65536
4a6091f79a71c90ad4535b8008941ff47b89e83c
518,018
def squash_spaces(inp): """ Convert multiple ' ' chars to a single space. :param inp: (str) :return: same string with only one space where multiple spaces were. """ return ' '.join(inp.split())
5ba2f56fdc5d4a5d7fbc1d482115015fbfc8c7ee
375,667
def __carta(soup): """ Gets the most read news from the Carta Capital page :param soup: the BeautifulSoup object :return: a list with the most read news from the Carta Capital Page """ news = [] container = soup.find('dd', id='fieldset-maisacessadas-semana') most_read = container.find_all('li') for item in most_read: news.append(dict(title=item.a.string, link=item.a['href'])) return news
867de5b58333cea3e84c811920b89135f1659f59
627,351
import json def get_raw_metrics_from_file(filename): """ Parse the given file as JSON and return a dictionary of the given metric names to their values. :param filename: JSON file name. :return: Dictionary of {beanName.metricName->Value}. """ values = {} with open(filename) as mf: try: j = json.load(mf) except json.decoder.JSONDecodeError: print("WARNING: Failed to decode {} as valid json".format(filename)) return values for bean in j['beans']: for m in bean.keys(): values[bean['name'] + ':' + m] = bean[m] return values
4ef4f47bc8a1d4f8629e9a6bbc382aa60e74d963
367,991
def replace_negative(l, default_value=0): """ Replaces all negative values with default_value :param l: Original list :param default_value: The value to replace negatives values with. Default is 0. :return: Number of values replaced """ n_replaced = 0 for i in range(len(l)): if l[i] < 0: l[i] = default_value n_replaced += 1 return n_replaced
431781a48a36a00329537b92b589cf223b945ca4
13,536
from typing import List import json def load_train_data(train_data_files: str) -> List: """Load jsonl train data as a list, ready to be ingested by spacy model. Args: train_data_local_path (str): Path of files to load. Returns: List: Tuple of texts and dict of entities to be used for training. """ train_data = [] for data_file in train_data_files: with open(data_file, "r") as f: for json_str in list(f): train_data_dict = json.loads(json_str) train_text = train_data_dict["text"] train_entities = { "entities": [ tuple(entity_elt) for entity_elt in train_data_dict["entities"] ] } formatted_train_line = (train_text, train_entities) train_data.append(formatted_train_line) return train_data
78f0517c12b04a630d788ded3c56fd926b0990ec
616,700
def get_note_ancestor(e): """ Return the first ancestor of `e` with tag "note", or None if there is no such ancestor. """ for a in e.iterancestors(): if a.tag == 'note': return a return None
fd7f2dd7cefaa10460e609a7bc7972b7ba7c3898
457,622
from pathlib import Path def read_final(path: Path) -> str: """ Repeatedly read the file until it is non-empty. Some notebook editors empty the source file before updating it. """ contents = '' while len(contents) == 0: with path.open() as file: contents = file.read() return contents
e9a8951d7fd14ec881a567018277cd3fcbb989f1
607,403
import ast import uuid def get_imports(path: str) -> dict: """ This function parse the module at specified path to look for import statements and return a dictionary representing the import statement. :param path: path to the Python module :return: information related to the import statements """ imports = {} with open(path, mode="r") as file_object: tree = ast.parse(file_object.read()) for node in ast.walk(tree): if isinstance(node, ast.Import): for module in node.names: imports[uuid.uuid4().hex] = {"name": module.name, "type": "import", "alias": module.asname} elif isinstance(node, ast.ImportFrom): for name in node.names: imports[uuid.uuid4().hex] = {"module": node.module, "name": name.name, "alias": name.asname, "type": "from-import", "level": node.level} return imports
9a1e412c832eec29a71b5ccf2fee925d71223277
411,974
def bfint(self, fname1="", ext1="", fname2="", ext2="", kpos="", clab="", kshs="", tolout="", tolhgt="", **kwargs): """Activates the body force interpolation operation. APDL Command: BFINT Parameters ---------- fname1 File name and directory path (248 characters maximum, including directory) from which to read data for interpolation. If you do not specify a directory path, it will default to your working directory and you can use all 248 characters for the file name. ext1 Filename extension (eight-character maximum). fname2 File name and directory path (248 characters maximum, including directory) to which BF commands are written. If you do not specify a directory path, it will default to your working directory and you can use all 248 characters for the file name. ext2 Filename extension (eight-character maximum). kpos Position on Fname2 to write block of BF commands: 0 - Beginning of file (overwrite existing file). 1 - End of file (append to existing file). clab Label (8 characters maximum, including the colon) for this block of BF commands in Fname2. This label is appended to the colon (:). Defaults to BFn, where n is the cumulative iteration number for the data set currently in the database. kshs Shell-to-solid submodeling key: 0 - Solid-to-solid or shell-to-shell submodel. 1 - Shell-to-solid submodel. tolout Extrapolation tolerance about elements, based on a fraction of the element dimension. Submodel nodes outside the element by more than TOLOUT are not accepted as candidates for DOF extrapolation. Defaults to 0.5 (50%). tolhgt Height tolerance above or below shell elements, in units of length. Used only for shell-to-shell submodeling (KSHS = 0). Submodel nodes off the element surface by more than TOLHGT are not accepted as candidates for DOF interpolation or extrapolation. Defaults to 0.0001 times the maximum element dimension. Notes ----- File Fname1 should contain a node list for which body forces are to be interpolated [NWRITE]. File Fname2 is created, and contains interpolated body forces written as a block of nodal BF commands. Body forces are interpolated from elements having TEMP as a valid body force or degree of freedom, and only the label TEMP is written on the nodal BF commands. Interpolation is performed for all nodes on file Fname1 using the results data currently in the database. For layered elements, use the LAYER command to select the locations of the temperatures to be used for interpolation. Default locations are the bottom of the bottom layer and the top of the top layer. The block of BF commands begins with an identifying colon label command and ends with a /EOF command. The colon label command is of the form :Clab, where Clab is described above. Interpolation from multiple results sets can be performed by looping through the results file in a user-defined macro. Additional blocks can be appended to Fname2 by using KPOS and unique colon labels. A /INPUT command, with the appropriate colon label, may be used to read the block of commands. If the model has coincident (or very close) nodes, BFINT must be applied to each part of the model separately to ensure that the mapping of the nodes is correct. For example, if nodes belonging to two adjacent parts linked by springs are coincident, the operation should be performed on each part of the model separately. """ command = f"BFINT,{fname1},{ext1},,{fname2},{ext2},,{kpos},{clab},{kshs},{tolout},{tolhgt}" return self.run(command, **kwargs)
0d61efebb0d4f377a70277a68b79a596869a582f
144,949
import math def effective_priority(value, levels): """ Method to determine effective priority given a distinct number of levels supported. Returns the lowest priority value that is of equivalent priority to the value passed in. """ if value <= 5-math.ceil(levels/2.0): return 0 if value >= 4+math.floor(levels/2.0): return 4+math.floor(levels/2.0) return value
b8d7764f71a14522999743778649e6ecf81639a6
269,037
def extend_diff_outliers(diff_indices): """ Extend difference-based outlier indices `diff_indices` by pairing Parameters ---------- diff_indices : array Array of indices of differences that have been detected as outliers. A difference index of ``i`` refers to the difference between volume ``i`` and volume ``i + 1``. Returns ------- extended_indices : array Array where each index ``j`` in `diff_indices has been replaced by two indices, ``j`` and ``j+1``, unless ``j+1`` is present in ``diff_indices``. For example, if the input was ``[3, 7, 8, 12, 20]``, ``[3, 4, 7, 8, 9, 12, 13, 20, 21]``. """ extended_indices = [] for item in diff_indices: if item not in extended_indices: extended_indices.append(item) if (item+1) not in extended_indices: extended_indices.append(item+1) return extended_indices
8803567fdede0ad585dc21af4660dbc66838af07
608,028
def _build_utilization_context(last_week, last_month, this_fy): """Build shared context components of utilization reports""" return { 'last_week_start_date': last_week['start_date'], 'last_week_end_date': last_week['end_date'], 'last_week_totals': last_week['totals'], 'last_month_start_date': last_month['start_date'], 'last_month_end_date': last_month['end_date'], 'last_month_totals': last_month['totals'], 'this_fy_end_date': this_fy['end_date'], 'this_fy_totals': this_fy['totals'], }
3d2a67e8472584faf94320d41fe642bcd1ab0876
421,427
import math def get_exp_between_levels(level1: int, level2: int): """ Gets the amoount of exp between two levels :param level1: The low level :param level2: The high level """ exp1 = 0 exp2 = 0 for i in range(1, level2): if i < level1: exp1 += math.floor(i + 300 * 2**(i/7)) exp2 += math.floor(i + 300 * 2**(i/7)) return math.floor(exp2 / 4) - math.floor(exp1 / 4)
e0e1ee32f9e17ed8e4e5b83c807bb648475e3431
210,586
def get_rst_bullet_list_item(text, level=1): """ Return a list item in the RST format """ item = '* ' + str(text) + '\n' return item
9104847b8f4432599c9182836a757755d938b4bc
358,496
def talos_colour(role): """Check if a role is a Talos Colour""" return role.name.startswith("<TALOS COLOR>")
5e6b71cee8ab2dc5a115d28ddf987ecbd28280ba
446,438
def norm_package_version(version): """Normalize a version by removing extra spaces and parentheses.""" if version: version = ','.join(v.strip() for v in version.split(',')).strip() if version.startswith('(') and version.endswith(')'): version = version[1:-1] version = ''.join(v for v in version if v.strip()) else: version = '' return version
d6120f9500bb7be879929204938444341d3178e0
632,550
def filter_api_changed(record): """Filter out LogRecords for requests that poll for changes.""" return not record.msg.endswith('api/changed/ HTTP/1.1" 200 -')
caa93f19ce00238786ae0c1687b7e34994b73260
686,775
def reverseString(string: str): """Reverses a string.""" return string[::-1]
2e1fcf4be067bd78661b81db8d7d4e58ca31c0d4
481,006
def extract_mac_address(scanlist): """ Extracts MAC address from the scan output. """ if "Address:" in "".join(scanlist): mac_address_index = scanlist.index("Address:") + 1 mac_address = scanlist[mac_address_index] return mac_address.strip() else: return "Unknown"
454aab0f10fb5724053a341fa0affbb5943a84ec
304,349
import string import random def get_random_str(size=10, chars=string.ascii_lowercase + string.ascii_uppercase + string.digits): """ return a random string size: size of an output string chars: characters to use """ return ''.join(random.choice(chars) for x in range(size))
b0d9582829d19bab32d42f3af270d71ccfc6dad4
344,409
def calculate_bearing_difference(bearing1, bearing2): """ Calculate smallest difference from bearing 1 -> bearing2. :param bearing1: start bearing in degrees (0-360) :param bearing2: end bearing in degrees (0-360) :return: angle between -180 and +180 degrees. """ # always return difference between -180 and +180 degrees difference = bearing2 - bearing1 if -180 < difference < 180: return difference elif difference <= -180: return difference + 360 elif difference >= 180: return difference - 360
438b1c75cd2f6aa77300be17021eff1e796c0135
234,485
def strip_space(string): """Remove spaces from string :argument string: target string :type string: str :returns str """ return string.replace(' ', '')
a7a80ec56d4d68b55279804ff9647e881dcca94d
189,038
def size(tensors, axis=0): """Measures the size of tensors along an axis. Args: tensors: Iterator of tensors. axis: Optional, axis along which to measure (default 0). Returns: Size of tensors along `axis`. Raises: ValueError: If shape of tensors do not match along `axis`. """ sizes = set([tsr.shape[axis] for tsr in tensors]) if len(sizes) not in (0, 1): msg = "tensors of uniform size along {} axis required, got shapes {}." raise ValueError(msg.format(axis, [tsr.shape for tsr in tensors])) return sizes.pop()
1fad48320b1da16d9a27266509fc33e88c0396d5
189,269
from typing import List def get_episode_indices(episodes_string: str) -> List[int]: """ Parse a string such as '2' or '1-5' into a list of integers such as [2] or [1, 2, 3, 4, 5]. """ episode_indices = [] if episodes_string is not None and episodes_string is not '': ll = [int(item) for item in episodes_string.split('-')] if len(ll) == 1: episode_indices = ll else: _start, _end = ll episode_indices = list(range(_start, _end + 1)) return episode_indices
100c21cb438b04587eef635720ca0752a87403ed
152,835
def insertion_sort(array): """Insertion sort.""" length = len(array) for i in range(1, length): val = array[i] while i > 0 and array[i - 1] > val: array[i] = array[i - 1] i -= 1 array[i] = val return array
8f32e1907ec748e656239db12bb5d6d77739606e
306,150
import re def find_identity_in_list(elements, identities): """Matches a list of identities to a list of elements. Args: elements: iterable of strings, arbitrary strings to match on. identities: iterable of (string, string), with first string being a regular expression, the second string being an identity. Returns: The identity specified in identities for the first regular expression matching the first element in elements. """ for element in elements: for regex, identity in identities: if re.search(regex, element): return identity return None
597b0e89547046a5ff7746344175ef6c0494b5ad
623,582
def getel(s): """Returns the unique element in a singleton set (or list).""" assert len(s) == 1 return list(s)[0]
e4af6436237dca46e5267e8d4ba7f55df68c98a6
440,910
import json def load_file_to_json(filename): """ Loads a file into a JSON object. Apart from loading the file into JSON, this function also removes any comments (denoted with #) and imports any other JSON objects (denoted with __LOAD__filename). :param filename: the filename of the file to be loaded. :returns: the JSON object that is contained in the file. """ datalines = [] with open(filename) as infile: for line in infile: comment = line.find('#') if comment >= 0: line = line[:comment] datalines.append(line) data = json.loads(''.join(datalines)) def recurse(tdata): for element in tdata: if type(tdata[element]) is dict: recurse(tdata[element]) elif type(tdata[element]) is str and tdata[element].startswith('__LOAD__'): nestedfilename = "properties/" + tdata[element][8:] + ".json" tdata[element] = json.loads(json.dumps(load_file_to_json(nestedfilename))) recurse(data) return data
0930240c3fffc604190938f40faa7d94c7cd5898
471,743
def construct_supervisor_command_line(supervisor_ip, cols, rows, area_size, traffic, road_cells, nodes, apn): """Creates the command to start up the supervisor. :type supervisor_ip: str :param supervisor_ip: the private IP address of supervisor :type cols: int :param cols: city cols :type rows: int :param rows: city rows :type area_size: int :param area_size: size of the area :type traffic: float :param traffic: traffic density :type road_cells: int :param road_cells: road cells :type nodes: int :param nodes: nodes number :type apn: int :param apn: areas per node :rtype: str :return: command line to start up the supervisor """ command_line = [ 'java', '-Dakka.remote.netty.tcp.hostname=' + supervisor_ip, '-Dtrafficsimulation.warmup.seconds=20', '-Dtrafficsimulation.time.seconds=20', '-Dtrafficsimulation.city.cols=' + str(cols), '-Dtrafficsimulation.city.rows=' + str(rows), '-Dtrafficsimulation.area.size=' + str(area_size), '-Dtrafficsimulation.area.traffic_density=%.2f' % traffic, '-Dtrafficsimulation.area.cells_between_intersections=' + str(road_cells), '-Dworker.nodes=' + str(nodes), '-Dworker.areas_per_node=' + str(apn), '-Dakka.remote.log-remote-lifecycle-events=off', '-Dakka.loglevel=INFO', '-jar', '/home/ubuntu/supervisor.jar' ] return ' '.join(command_line)
c72a08139efcb08397a9ad2acb4f82af462e123b
476,803
def get_geom(es_place): """Return the correct geometry from the elastic response A correct geometry means both lat and lon coordinates are required >>> get_geom({}) is None True >>> get_geom({'coord':{"lon": None, "lat": 48.858260156496016}}) is None True >>> get_geom({'coord':{"lon": 2.2944990157640612, "lat": None}}) is None True >>> get_geom({'coord':{"lon": 2.2944990157640612, "lat": 48.858260156496016}}) {'type': 'Point', 'coordinates': [2.2944990157640612, 48.858260156496016], 'center': [2.2944990157640612, 48.858260156496016]} """ geom = None if 'coord' in es_place: coord = es_place.get('coord') lon = coord.get('lon') lat = coord.get('lat') if lon is not None and lat is not None: geom = { 'type': 'Point', 'coordinates': [lon, lat], 'center': [lon, lat] } if 'bbox' in es_place: geom['bbox'] = es_place.get('bbox') return geom
e41f6dad73dd41724bb7aa7fa62fca959f2e5a5b
473,018
def generate_projwfc_node(generate_calc_job_node, fixture_localhost, tmpdir): """Fixture to constructure a ``projwfc.x`` calcjob node for a specified test.""" def _generate_projwfc_node(test_name): """Generate a mock ``ProjwfcCalculation`` node for testing the parsing. :param test_name: The name of the test folder that contains the output files. """ entry_point_calc_job = 'quantumespresso.projwfc' retrieve_temporary_list = ['data-file-schema.xml'] attributes = {'retrieve_temporary_list': retrieve_temporary_list} node = generate_calc_job_node( entry_point_name=entry_point_calc_job, computer=fixture_localhost, test_name=test_name, attributes=attributes, retrieve_temporary=(tmpdir, retrieve_temporary_list) ) return node return _generate_projwfc_node
9e167013a320e6b69d0121a820ea62efc9fc9f92
691,185
def Cintegrate(phi, HC, dt): """ Explicit Euler integration to simulate a classical random walker. Parameters: phi (array): prob. distr. at current time HC (sparse matrix): classical Hamiltonian dt (float): time step Returns: sparse matrix: prob. distr. at next time step """ phip1 = phi-HC.dot(phi)*dt return phip1
a10e294f7604f7e5733a08296f8a169e5e171077
378,076
def drop_labels(events, min_pct=.05): """ Snippet 3.8 page 54 This function recursively eliminates rare observations. :param events: (data frame) events :param min_pct: (float) a fraction used to decide if the observation occurs less than that fraction :return: (data frame) of events """ # Apply weights, drop labels with insufficient examples while True: df0 = events['bin'].value_counts(normalize=True) if df0.min() > min_pct or df0.shape[0] < 3: break print('dropped label: ', df0.argmin(), df0.min()) events = events[events['bin'] != df0.argmin()] return events
49a93a0ad4ed81bd86733f275cd199f0f56d9f34
58,898
def stop_word_removal(text_all, cached_stop_words): """ Returns text with removed stop words Keyword arguments: text_all -- list of all texts (list of str) cached_stop_words -- list of all stopwords (list of str) """ new_text_all = [] for text in text_all: text1 = ' '.join([word for word in text.split() if word not in cached_stop_words]) new_text_all.append(text1) return new_text_all
543e78edb078778d9acb2fd26be90c267a5b6450
696,769
import math def rms_error(seq1, seq2): """Returns the RMS error between two lists of values""" assert len(seq1) == len(seq2) return math.sqrt(sum((x - y) ** 2 for x, y in zip(seq1, seq2)) / len(seq1))
de846eb1b318e24563319ce11013f6a0ca7583c5
623,309
def format_data(tax_order, relAbs): """ Formats data for plotting Arguments: tax_order (list) -- names of tax groups in order of decreasing median median relative abundance relAbs (dict) -- maps tax group index (determined by which column it is in the csv file) to list of the group name and relative abundances per sample (e.g.: {1: ['Streptococcus', 0.5, 0.25, 0.25]}) Returns: data_to_plot (list) -- for each taxon, a list of relative abundance per sample phyla_labels (list) -- names of tax groups, from least to most abundant/common """ data_to_plot = [] phyla_labels = [] for i in tax_order: for key in relAbs: if relAbs[key][0] == i: data_to_plot.append(relAbs[key][1:]) # remember element 0 is the english name of the tax group # Add alphanumeric identifiers to candidate phyla labels if i == "SR1": phylum = "Absconditabacteria (SR1)" elif "Saccharibacteria" in i: phylum = "Saccharibacteria (TM7)" elif "Parcubacteria" in i: phylum = "Parcubacteria (OD1)" elif "Microgenomates" in i: phylum = "Microgenomates (OP11)" elif "Armatimonadetes" in i: phylum = "Armatimonadetes (OP10)" elif "Marinimicrobia" in i: phylum = "Marinimicrobia (SAR406)" elif "Latescibacteria" in i: phylum = "Latescibacteria (WS3)" elif "Aminicenantes" in i: phylum = "Aminicenantes (OP8)" elif "Cloacimonetes" in i: phylum = "Cloacimonetes (WWE1)" else: phylum = i phyla_labels.append(phylum) data_to_plot.reverse() phyla_labels.reverse() return data_to_plot, phyla_labels
cdcec4de49567bac1cfeff64c1492ecd2c1693cc
443,184
import uuid def uuid_is_valid(test_uuid): """ Determine if a UUID is valid """ try: uuid_object = uuid.UUID(test_uuid) except ValueError: return False return True
fbc171e2aeed0fd3587ffbc839f27cdd5b594619
362,583
def _properties(source, size, require_staging, remote): """Args: source (str): source of the data size (int): number of rows of the dataset require_staging (bool): whether the file requires staging remote (str): remote path Returns: dict: set of importer properties """ return { "source": source, "size": size, "require_staging": require_staging, "remote_path": remote }
f5e365a2ca8587c71afb2a868a8411be08529a1f
424,630
def utc_to_utcf(utc): """ Converts a UTC datetime into UTC string format %Y-%m-%dT%H:%M:%SZ. :param utc: python UTC datetime :return: a string in UTC string format %Y-%m-%dT%H:%M:%SZ """ return "%04d-%02d-%02dT%02d:%02d:%02dZ" % (utc.year, utc.month, utc.day, utc.hour, utc.minute, utc.second)
59c3bba879b33a9c73008245fe57b155aa63a3b1
236,482
def constant(value: float = 0): """ A constant float. Parameters ---------- value : float The float value. """ return value
de13e64083765fbd429e66f7c55afa887fcbd34f
597,626
import string import random def generate_key(length=50): """ Generate a random string Args: length: Number of characters in returned string (default = 50) Returns: Randomized secret key string """ options = string.digits + string.ascii_letters + string.punctuation key = ''.join([random.choice(options) for i in range(length)]) return key
dd8463da1e7fca2b51d4e1899e5999ea3e6e1f48
180,856
def f(n): """ f(0) = 0 f(1) = 1 f(2) = 1 f(3) = 2 f(4) = 3 f(5) = 5 f(n) = f(n-1) + f(n-2) """ if n < 2: return n prev_number = 1 current_number = 1 # f(2) result = 1 for idx in range(3, n+1): result = current_number + prev_number prev_number = current_number current_number = result return result
093ae51ab1c74ee7eed2cf43a011431de7191f4f
247,777
def blocks(text): """Split the text into blocks deliminated by a blank line.""" return text.split("\n\n")
bda99561d35b729203fb7fe945c23147c62ebc24
25,495
def bisect(list_, x, begin=0, end=None): """ Return the insertion point for x to maintain sorted order, in logarithmic time. list_ -- the sorted list of items; we assume ascending order by default x -- the item we search for begin -- the first index of the range we want to search in end -- the index past end of the range we want to search in Search inside list_[begin:end]. The result is an index i such as if we insert the item there, the list remains sorted. Time complexity: O(log(N)), where N is the length of the search range, i.e. `N = end - begin` """ # check input parameters if end is None: end = len(list_) if not 0 <= begin <= end <= len(list_): raise ValueError('0 <= begin <= end <= len(list_) must hold') if begin == end: return begin # we don't want to implement this recursively so that we won't have to # worry about Python's max recursion depth # loop while the search range includes two or more items while end - begin > 1: # find the midpoint mid = begin + (end - begin) // 2 # "halve" the search range and continue searching # in the appropriate half if x <= list_[mid]: end = mid else: begin = mid + 1 # we are down to a search range of zero or one items; either: # - begin == end, which can happen if we search for an item that's bigger # than every item in the list, or # - begin is the index of the only left item; end is the next index if begin == end or x <= list_[begin]: return begin else: return end
9d988af61399332d17be7981eaf08f5631aca34f
436,809
def get_file_contents(file_path, method="r"): """ Get file contents. Arguments: file_path : str method : str Returns: str """ with open(file_path, method) as f: contents = f.read() f.close() return contents
dd0735f7ca2fcce54e1234e9aba07cadab8d980f
501,670
def convert_to_list(argument): """Convert a comma separated list into a list of python values""" if argument is None: return [] else: return [i.strip() for i in argument.split(",")]
0bd8ea8c3a0928155dfad9992eb4492b234f3efa
367,609
def get_endpoint(name, array): """Return Endpoint or None""" try: return array.get_volume(name, pending=True, protocol_endpoint=True) except Exception: return None
c85a7a38055358dc5c489286a22002a75616f966
539,499
import hashlib def isrightpwd(pwd, encrypted_pwd): """ Make Sure The Password(pwd) Is The Right One. Judge By The SHA512 Encrypted Password(encrypted_pwd). """ if hashlib.sha512(pwd.encode('utf-8')).hexdigest() == encrypted_pwd: return True else: return False
b747b9dfee6b9f27c192f27af5b69c32556bff39
206,382
def get_pokemon_type(pokemon_types): """Asks the user for a type of pokemon and displays the names of all pokemon with that type. Implicitly converts the type entered by the user to lower case. If the user enters an invalid type, warns the user and repeats until they enter a valid one. Args: pokemon_types: a list of pokemon types sorted in alphabetic order Returns: the pokemon type chosen by the user """ # This is the prompt you'll display to the user to ask for their choice. # Don't modify it! prompt = ('enter a type from one of the following: \n{0}\n'.format( ', '.join(pokemon_types))) # This is the prompt you'll display to the user if they enter something # invalid. Don't modify it! warning = 'Unrecognized type' choice = input(prompt) choice = choice.lower() condition = True while condition: if choice in pokemon_types: condition = False else: print(warning) choice = input(prompt) choice = choice.lower() return choice
571dbe5d04c749d83bf2bbcd7e6ee3b3f6bf1a62
19,293
import random def get_epsilon_greedy_action(epsilon, Q_actions): """Choose an action using the epsilon-greedy strategy. :param epsilon: :param Q_actions: :return: """ if random.random() > epsilon: max_actions = [] max_v = float("-inf") for action in Q_actions: if Q_actions[action] > max_v: max_v = Q_actions[action] max_actions = [action] elif Q_actions[action] == max_v: max_actions.append(action) return random.choice(max_actions) else: return random.choice(list(Q_actions.keys()))
89edadedbbd532e5635fa8d9467ca1b20fbf45a0
252,436
import torch def get_padded_tensor(tensor, n_padding, padding_index, special_embedding, front =True): """ Args: tensor (torch.Tensor): tensor to be padded n_padding (int): padding nums padding_index (int): which chars to use: (0-SOS 1-NULL 2-UNK 3-EOS) special_embedding (torch.Tensor): embedding tensor of 4 special chars front (bool): pad front or end Returns: padded_tensor (torch.Tensor) """ if tensor == 'NULL': return special_embedding[1].repeat(n_padding,1) padding_tensor = special_embedding[padding_index].repeat(n_padding,1) padded_tensor = torch.cat((padding_tensor, tensor), dim=0) if front \ else torch.cat((tensor, padding_tensor), dim = 0) return padded_tensor
e056e190d7e816b8eebac51c77186658397332e6
342,760
def perform_substitution(match, substitution_map): """Substitutes C++ identifiers with C# identifiers. We want to perform subsitutions on function names, but not accidentally hit anything else in the string. For example, if the line looks like this: /// Returns true if `firebase::crash::Initialize()` has been called. Then we want the final string to be: /// Returns true if `Firebase.Crash.Initialize()` has been called. The regex looks for identifiers enclosed within backticks ignoring things like parentheses. If we did the substitution directly, the backticks and parentheses would be lost. Instead, what we do is find out what the captured match was (in this case, 'firebase::crash::Initialize') then take the whole match ('`firebase::crash::Initialize()`'), and subtitute just the portion we care about so that the surrounding characters can be preserved. Args: match: The re.Match object representing the match. substitution_map: The dict of potential substitutions. Returns: The new C# code resulting from performing the substitutions. """ full_match = match.group(0) cpp = match.group(1) cs = substitution_map.get(cpp) if cs: return full_match.replace(cpp, cs) else: return full_match
e9fc887fb16a759b76a5e13d8104650d095ce4f9
367,867
from typing import List def parse_timeseries( timeseries_str, time_input_unit="minute", time_output_unit="second" ) -> List[List[float]]: """Create a list of 2-list [timestep (seconds), value (mm/hour)].""" if not timeseries_str: return [[]] output = [] for line in timeseries_str.split(): timestep, value = line.split(",") if time_input_unit == "second": timestep = int(timestep.strip()) else: timestep = int(timestep.strip()) if time_output_unit == "second": timestep *= 60 output.append([timestep, float(value.strip())]) return output
90c498650adf5764a6e7e4fedc1bcf7da454f192
655,305
def get_user_pw(request): """ Obtains user's credentials from request object. @return (username, password) if credentials are available and (None, None) otherwise """ if 'Authorization' not in request.headers: return (None, None) auth = request.authorization return (auth.username, auth.password)
16587db1d82634efede9c6424353a7278332105e
124,884
import json def load_vocab(f): """Load the vocab from a file. """ json_dict = json.loads(f.read()) vocab = {int(k): v for k, v in json_dict.items()} vocab.update({v: k for k, v in vocab.items()}) return vocab
e8c65382377b1c21f881409d3d71bbd5d9395711
427,000
def get_headers(oauth_token: str) -> dict: """Common headers for all requests""" return { 'Authorization': f'OAuth {oauth_token}' }
285f88b6268f50209432698a12ba2b4b57ecd1ee
691,446
def generate_description(slug: str) -> str: """ Fungsi yang menerima input berupa string dengan kebab-case dan mengubahnya menjadi string UPPERCASE. Fungsi ini juga akan mengakronim input yang terlalu panjang (lebih dari 30 karakter). Contoh: >>> generate_description("home") 'HOME' >>> generate_description("about") 'ABOUT' >>> generate_description("contact-us") 'CONTACT US' >>> generate_description("sebuah-slug-yang-panjang-sekali") 'SSYPS' """ if len(slug) > 30: # return akronim dari slug yang telah # dipisahkan ke dalam sebuah list return "".join([d[0] for d in slug.split("-")]).upper() else: return slug.replace("-", " ").upper()
9fe1e270d799dc47d8a28d1c4e2eeb4c28525ef3
630,731
import torch def kl_term(mean, scale): """KL divergence between N(mean, scale) and N(0, 1)""" return .5 * (1 - 2 * torch.log(scale) + (mean * mean + scale * scale))
8654619040a6ff138a0f65c2874f136f0759c9c8
127,528
def roll(arr, step): """ Roll array, by "step". """ return arr[step:] + arr[:step]
11a23fc878b8623b8014ed1b5b440d0db133f996
525,488
def sigfig(number, places): """ Round `number` to `places` significant digits. Parameters: number (int or float): A number to round. places (int): The number of places to round to. Returns: A number """ # Passing a negative int to round() gives us a sigfig determination. # Example: round(12345, -2) = 12300 ndigits = -int(len(str(abs(number)).split('.')[0]) - places) return round(number, ndigits)
0aae9fff082b35a18418b2dae8c6b6787e3c947a
683,944
import re def is_literature(paragraph: str) -> bool: """ Check if a paragraph is a literature entry. Parameters ---------- paragraph : str Returns ------- is_literature : bool """ doi_regex = re.compile(r"""(10[.][0-9]{4,}(?:[.][0-9]+)*/(?:(?!["&\'<>])\S)+)""") issn_regex = re.compile(r"""ISSN \d+""", re.IGNORECASE) vol_regex = re.compile(r"""vol\. [IVCXL\d]+""", re.IGNORECASE) return ( "ISBN" in paragraph or bool(doi_regex.search(paragraph)) or bool(issn_regex.search(paragraph)) or bool(vol_regex.search(paragraph)) or "https://" in paragraph or "http://" in paragraph )
861c6332fb4eea0a696e1705c68dd49c6bab0885
46,708
import math def atan(x): """Get the arc tangent of x, an angle in radians. The range of result values is [-π / 2, π / 2]. """ return math.atan(x)
2a7ec09a5c159f08e4265c192de9b1cfc452c668
519,745
from struct import pack, unpack def zfp_accuracy_opts(accuracy): """Create compression options for ZFP in fixed-accuracy mode The float accuracy parameter is the absolute error tolarance (e.g. 0.001). """ zfp_mode_accuracy = 3 accuracy = pack('<d', accuracy) # Pack as IEEE 754 double high = unpack('<I', accuracy[0:4])[0] # Unpack high bits as unsigned int low = unpack('<I', accuracy[4:8])[0] # Unpack low bits as unsigned int return zfp_mode_accuracy, 0, high, low, 0, 0
3a30c75e1ff25d7aeb685f49d7df1aa20527080a
196,938
def adjust_fit(dst_w, dst_h, img_w, img_h): """ given a x and y of dest, determine the ratio and return an (x,y,w,h) for a fitted image (note x or y could be neg). >>> adjust_fit(4,3,5,5) (0.5, 0, 3.0, 3.0) >>> adjust_fit(8,6,5,5) (1.0, 0, 6.0, 6.0) >>> adjust_fit(4,3,5,2) (0, 0.69999999999999996, 4.0, 1.6000000000000001) >>> adjust_fit(8,6,5,2) (0, 1.3999999999999999, 8.0, 3.2000000000000002) """ dst_w = float(dst_w) dst_h = float(dst_h) img_w = float(img_w) img_h = float(img_h) dst_ratio = float(dst_w)/dst_h img_ratio = float(img_w)/img_h if dst_ratio > img_ratio: # image is narrower, use height y = 0 h = dst_h w = h * img_ratio x = dst_w/2 - w/2 else: scale = dst_h/img_h x = 0 w = dst_w h = w/img_ratio y = dst_h/2 - h/2 return x,y,w,h
febcfcb1f35af4b54c6f9d0c2da09bb82e1b0664
458,274