content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def calc_default_colors(p_index): """List of default colors, lines, and markers to use if user does not specify them in the input yaml file. Parameters ---------- p_index : integer Number of pairs in analysis class Returns ------- list List of dictionaries containing default colors, lines, and markers to use for plotting for the number of pairs in analysis class """ x = [dict(color='b', linestyle='--',marker='x'), dict(color='g', linestyle='-.',marker='o'), dict(color='r', linestyle=':',marker='v'), dict(color='c', linestyle='--',marker='^'), dict(color='m', linestyle='-.',marker='s')] #Repeat these 5 instances over and over if more than 5 lines. return x[p_index % 5]
febc6ce222f57dec8192379855f404efdfb24c7e
483,673
def relationship_types() -> str: """ Returns a statement to get all the relationship types in the database. Returns ------- out: str Neo4j statement """ return 'CALL db.relationshipTypes()'
cebfff5f89e7673f5227ec1a5bf39105d6d2ef0d
345,503
def get_tuplets(gn): """Return the (part of) tuplets on top of a general note. The tuplets are expressed as a list of strings "start", "continue" and "stop. Args: gn (GeneralNote): the music21 general note (e.g. note, rest or chord). Returns: list: the list of tuplets. """ tuplets_list = [t.type for t in gn.duration.tuplets] # substitute None with continue return ["continue" if t is None else t for t in tuplets_list]
21eb1a99abadac746ea3b7363d23b8e920e1b340
422,908
import math def ToMercDegrees(y, num_tiles): """Calculate latitude of southern border of yth tile in degrees. LOD is log2(num_tiles) Args: y: (float) Position of tile in qt grid moving from south to north. Non-integer values give latitude within tile. num_tiles: (integer) Number of tiles in the qt grid. Returns: Latitude of southern border of tile in degrees. """ # Calculate on standard Mercator scale that spans from -pi to pi. # There is no intrinsic reason for using these values, which correspond to # about -85 to 85 degrees, other than it matches (albeit somewhat # misleadingly) the longitudinal radian span, and it's the span Google # uses for its 2d maps. y_merc = 2.0 * math.pi * y / num_tiles - math.pi latitude_rad = (math.atan(math.exp(y_merc)) - math.pi / 4.0) * 2.0 return latitude_rad / math.pi * 180.0
76b0859a9b3a60878bf64398a43fe92b70af1303
493,466
def read_analogies(filename, word2id): """ Reads through the analogy question file. Returns: questions: a [n, 4] numpy array containing the analogy question's word ids. questions_skipped: questions skipped due to unknown words. """ questions = [] questions_skipped = 0 with open(filename, 'r') as analogy_f: for line in analogy_f: if line.startswith(':'): # Skip comments. continue words = line.strip().lower().split() ids = [w in word2id for w in words] if False in ids or len(ids) != 4: questions_skipped += 1 else: questions.append(words) print('Eval analogy file: {}'.format(filename)) print('Questions: {}'.format(len(questions))) print('Skipped: {}'.format(questions_skipped)) return questions
10ab73c672e8c7614b483a760e3385fd981621a3
401,101
def map_category_id(category_map): """ Assign an ID to each category """ category_id = {} id_category = {} counter = 0 for category in category_map: category_id[category['name']] = counter id_category[counter] = category['name'] counter += 1 return category_id, id_category
9d48427aeb0e8ae2ac53ec02768d3081f641fcb6
670,749
def compute_cell_extents_grid(bounding_rect=(0.03, 0.03, 0.97, 0.97), num_rows=2, num_cols=6, axis_pad=0.01): """ Produces array of num_rows*num_cols elements each containing the rectangular extents of the corresponding cell the grid, whose position is within bounding_rect. """ left, bottom, width, height = bounding_rect height_padding = axis_pad * (num_rows + 1) width_padding = axis_pad * (num_cols + 1) cell_height = float((height - height_padding) / num_rows) cell_width = float((width - width_padding) / num_cols) cell_height_padded = cell_height + axis_pad cell_width_padded = cell_width + axis_pad extents = list() for row in range(num_rows - 1, -1, -1): for col in range(num_cols): extents.append((left + col * cell_width_padded, bottom + row * cell_height_padded, cell_width, cell_height)) return extents
b2560857876440164eff4d9bdb1c2e659eb850dd
340,752
def from_json(json_data, key): """ Helper method to extract values from JSON data. Return the value of `key` from `json_data`, or None if `json_data` does not contain `key`. """ if key in json_data: return json_data[key] return None
a573c780166530781b7fc495e3c786011f484f39
478,732
def is_msvc(conanfile): """ Validate if current compiler in setttings is 'Visual Studio' or 'msvc' :param conanfile: ConanFile instance :return: True, if the host compiler is related to Visual Studio, otherwise, False. """ settings = conanfile.settings return settings.get_safe("compiler") in ["Visual Studio", "msvc"]
dfdbc4a62201c915fdc392a3d62bdd3b3c65db74
185,096
def lat(source): """Convert source bytes to a latitude (deg, min) pair. Latitude: 2543.7024 = DDMM.MMMM >>> lat(b'2543.7024') (25, 43.7024) """ if len(source) == 0: return None, None dd= int(source[:2]) mm= float(source[2:]) return int(dd), float(mm)
631b5421c38556ca0b71bf26f38d50f02edf641a
229,785
def polygon_area(polygon): """Calculates the area of an arbitrary polygon.""" area = 0.0 n = len(polygon) if n > 0: polygon -= polygon[0] for j, p1 in enumerate(polygon): p2 = polygon[(j+1) % n] area += p1[0] * p2[1] - p2[0] * p1[1] return 0.5 * area
2c7f858d5a19edac18f9572d6b469dbd6db67ab4
607,437
def fmt_cycles(cycles): """ Format cycle information into a string. """ out_data = [] for entry in cycles: datav = entry[0] + '-' + str(entry[1]) out_data.append(datav) return ','.join(out_data)
b6012807f7b50f8f0ff22ae86ef1b7f631ade6bb
351,753
def qs_opt_eq(qs, opt, value): """ Returns True if the query string option is equal to value. """ for opt_value in qs.get(opt, []): if opt_value == value: return True
68dede185a9b1aa62e32a833db4b395ce6c4b1cd
643,060
def dash_to_underscore(keyword): """Change dash '-' to underscore '_' in string param keyword: string where the replacement has to be done return: string. Example one-two changes to one_two """ return keyword.replace('-', '_')
870ffddfe0e16d44a52cc2cd6fd7a2f584239250
297,035
def kind(event): """ Finds the type of an event :param event: the event :return: the type of the event """ return event.type
68f0170eac9fc06f954542769dcd0d4ef974e725
699,054
def get_conversion_rate(df, total, conversions): """Return the conversion rate of column. Args: :param df: Pandas DataFrame. :param total: Column containing the total value. :param conversions: Column containing the conversions value. Returns: Conversion rate of conversions / total Example: df['cr'] = get_conversion_rate(df, 'sessions', 'orders') """ value = (df[conversions] / df[total]) return value
3dbfd40669e3fc751a1344e4b9d6c467f742e35b
102,619
import json def load_json(config_path): """Load json file content Args: config_path (str): Path to json file. Returns: dict: config dictionary. """ with open(config_path, "r") as fhandle: default_config = json.load(fhandle) return default_config
d4924cd90857ac5b8cf63e942a6914e595132d30
121,669
import typing import ast def genConstructAstObj(varName: str, className: str) -> typing.Tuple[ast.Name, ast.Name, ast.Assign]: """ Returns a tuple of 3 elements first is `className` AST node second is `varName` AST node third is `varName = className()`""" clsNameAST = ast.Name(id=className, ctx=ast.Load()) return ( clsNameAST, ast.Name(id=varName, ctx=ast.Load()), ast.Assign( targets=[ast.Name(id=varName, ctx=ast.Store())], value=ast.Call(func=clsNameAST, args=[], keywords=[]), type_comment=None, ), )
773054faa998d597e88118004473cd4fd33b3609
352,836
import re def sub_args(args, args_regex, sub): """Substitute all instances of args_regex in args. Works if args is a str, list, tuple, or dict. All values replaced with re.sub. If args is dict, only the values, not the keys, are replaced. :args: str, list, tuple, or dict :args_regex: r'' expression to replace with :sub: string to replace regex with :returns: args, but with all instances of regex replaced """ step_regex = re.compile(args_regex) if isinstance(args, str): return step_regex.sub(sub, args) elif isinstance(args, (tuple, list)): step_args = [] for arg in args: step_args.append(step_regex.sub(sub, arg)) return tuple(step_args) elif isinstance(args, dict): step_args = {} for k, v in args.items(): step_args[k] = step_regex.sub(sub, v) return step_args
9d72de0266ac8bfe3a4e3dd5ffa17f761938ad29
517,306
def create_instance_template(properties, name_prefix): """ Creates an instance template resource. """ name = name_prefix + '-it' instance_template = { 'type': 'instance_template.py', 'name': name, 'properties': properties } self_link = '$(ref.{}.selfLink)'.format(name) return self_link, [instance_template], [ { 'name': 'instanceTemplateSelfLink', 'value': self_link } ]
28a1f6403a23565b53ff08f94ee3f13875645c9b
276,118
def get_members(connection, id, include_access=False, offset=0, limit=-1, error_msg=None): """Get member information for a specific user group. Args: connection: MicroStrategy REST API connection object id (string): ID of user group containing your required privileges include_access (bool, optional): Specifies whether to return access for members offset (integer, optional): Starting point within the collection of returned search results. Used to control paging behavior. limit (integer, optional): Maximum number of items returned for a single search request. Used to control paging behavior. Use -1 for no limit (subject to governing settings). error_msg (string, optional): Custom Error Message for Error Handling Returns: Complete HTTP response object. """ return connection.get( url=f'{connection.base_url}/api/usergroups/{id}/members', headers={'X-MSTR-ProjectID': None}, params={ 'includeAccess': include_access, 'offset': offset, 'limit': limit }, )
1c87036232fafb54cf8d0a3bfe440e2c4f4b5e22
342,250
def accuracy(list1, list2): """ Computes the element-wise similarity between lists list1 and list2. INPUTS: - list1 [list]: list of arbitrary values - list2 [list]: list of arbitrary values RETURNS: - n_correct [int]: total num. of elementwise matches - n_total [int]: total num. elements compared """ assert len(list1) == len(list2) n_total = len(list1) n_correct = sum([list1[i] == list2[i] for i in range(n_total)]) return n_correct, n_total
319e0f9baec2e5dac3fb62df0388b6d136c1fa41
474,439
def bundle_tweets(tweets): """ Bundles all the text in the tweets into a single string """ result = '' for tweet in tweets: result += tweet['text'] return result
8256439f8f0dab2cd9429bc203785c4df8d88f5a
193,637
def extract_hashtags(text, tags_to_append=[]): """Extracts distinct sorted hashtag collection from a string, complemented with tags_to_append items.""" return sorted(set([item.strip("#.,-\"\'&*^!") for item in text.split() if (item.startswith("#") and len(item) < 256)] + tags_to_append))
560f3d77a8e424382874e626333f77681f39674d
488,789
def score_programs(adata_here,program_name='bulk.guide.program', pref=None,copy=False): """Get program score for each cell, by averaging program genes """ if copy: adata_here = adata_here.copy() if pref is None: pref=program_name programs=list(set(adata_here.var[program_name])) for pro in programs: print('scoring',pro) pro_genes=adata_here.var_names[adata_here.var[program_name]==pro] adata_here.obs[pref+str(pro)]=adata_here[:,pro_genes].X.mean(axis=1) if copy: return(adata_here)
d023b37f5ad375a6a969ad1a4e7058044c3c97f1
305,389
def get_anntypes(batch): """Get anntypes from annotation """ return [ann["anntype"] for ann in batch.annotation]
3584c7efb06c266df54cf33963a8078f45974653
304,247
def filter_subjects_by_coverage(args): """Check whether the subject passes the coverage filter.""" subject, cov, SD_MEAN_CUTOFF, STRIM_5, STRIM_3 = args # Trim the ends if cov.shape[0] >= STRIM_5 + STRIM_3 + 10: cov = cov[STRIM_5: -STRIM_3] passes_filter = cov.mean() > 0 and cov.std() / cov.mean() <= SD_MEAN_CUTOFF return subject, passes_filter
450cb5cdf496a58f0a97d111997c4b57d75038f2
195,456
def definite_crop(img, bbox): """crop an image. Args: img: `ndarray`, input image bbox: list, with crop co-ordinates and width and height e.g.: [x, y, width, height] Returns: returns cropped image """ img = img[:, bbox[0]:bbox[2], bbox[1]:bbox[3]] return img
bdfbaf42022aa06a79c2f2b0fa1f3c0d0ea711d8
141,182
def classify_by_composition(structures): """ Classify the different IStructures by composition. :param structures: List of structures :return: Dictionary of composition strings with a List of the corresponding IStructures. """ comp_dic = {} for structure in structures: if comp_dic.get(str(structure.composition), False): comp_dic[str(structure.composition)].append(structure) else: comp_dic[str(structure.composition)] = [structure, ] return comp_dic
d7df6a6d678bec641e3007328bbc7d1f9ca8f323
574,442
def get_median(numbers: list) -> int: """ Get the median from a list of numbers. :param numbers: The list containing the numbers. :return: The median. """ numbers.sort() return numbers[len(numbers) // 2]
dbdf0f0257865fe400cb6f8481a2761cec2202ac
389,384
def _add_thumb(s): """ Modifies a string (filename, URL) containing an image filename, to insert '.thumb.jpg' at the end. """ return s + ".thumb.jpg"
220bd067a7c443853ccdadc553ff6e73eaedaa32
391,513
import cmath # Can return complex numbers from square roots def quad_roots(a=1.0, b=2.0, c=0.0): """Returns the roots of a quadratic equation: ax^2 + bx + c = 0. INPUTS ======= a: float, optional, default value is 1 Coefficient of quadratic term b: float, optional, default value is 2 Coefficient of linear term c: float, optional, default value is 0 Constant term RETURNS ======== roots: 2-tuple of complex floats Has the form (root1, root2) unless a = 0 in which case a ValueError exception is raised EXAMPLES ========= >>> quad_roots(1.0, 1.0, -12.0) ((3+0j), (-4+0j)) """ if a == 0: raise ValueError("The quadratic coefficient is zero. This is not a quadratic equation.") else: sqrtdisc = cmath.sqrt(b * b - 4.0 * a * c) r1 = -b + sqrtdisc r2 = -b - sqrtdisc twoa = 2.0 * a return (r1 / twoa, r2 / twoa)
665e79754a37ffca1f7c9396e4b3f0186f80022a
110,041
def parse_bitcoin_conf(fd): """Returns dict from bitcoin.conf-like configuration file from open fd""" conf = {} for line in fd: line = line.strip() if not line.startswith('#') and '=' in line: key, value = line.split('=', 1) conf[key] = value return conf
33ac9d9561c7effeb3e499431fbb708703c1bd00
528,285
def extract_ids(objects_or_ids): """Return a list of ids given either objects with ids or a list of ids.""" try: ids = [obj.id for obj in objects_or_ids] except: ids = objects_or_ids return ids
3a70f1f29cf2538d06439254daeff20f1982c85b
481,493
def is_set(cards): """ Return True if all cards all unique, False otherwise. """ return len(set(cards)) == len(cards)
faa2585b108d8b970cb01f2bc38b0757bdfd1079
568,692
def forecast_error(y, y_predicted): """Calculate the forecast error in a regression model. Parameters ---------- y : array-like of shape = number_of_outputs Represent the target values. y_predicted : array-like of shape = number_of_outputs Target values predicted by the model. Returns ------- loss : ndarray of floats The difference between the true target values and the predicted or forecast value in regression or any other phenomenon. References ---------- [1] `Wikipedia entry on the Forecast error <https://en.wikipedia.org/wiki/Forecast_error>`_ Examples -------- >>> y = [3, -0.5, 2, 7] >>> y_predicted = [2.5, 0.0, 2, 8] >>> forecast_error(y, y_predicted) [0.5, -0.5, 0, -1] """ return y - y_predicted
4aede163ae2b1affbc533c9ef054ba789c8c53f0
629,643
import time def retry(func, exc=Exception, tries=3, delay=1): """ Call ``func()`` up to ``tries`` times, exiting only if the function returns without an exception. If the function raises an exception on the final try that exception is raised. If given, ``exc`` can be either an `Exception` or a tuple of `Exception`s in which only those exceptions result in a retry, and all other exceptions are raised. ``delay`` is the time in seconds between each retry, and doubles after each retry. """ while True: try: return func() except exc: tries -= 1 if tries == 0: raise time.sleep(delay) delay *= 2
5384afd77840b77b2cb278502d8fc64890af6be7
22,227
from typing import Dict from typing import Any def source_startswith(cell: Dict[str, Any], key: str) -> bool: """Return True if cell source start with the key.""" source = cell.get("source", []) return len(source) > 0 and source[0].startswith(key)
a3dab1e72488a5075832432f36c6fc10d9808a7f
29,455
import ast from typing import List from typing import Tuple def get_scr119(node: ast.ClassDef) -> List[Tuple[int, int, str]]: """ Get a list of all classes that should be dataclasses" ClassDef( name='Person', bases=[], keywords=[], body=[ AnnAssign( target=Name(id='first_name', ctx=Store()), annotation=Name(id='str', ctx=Load()), value=None, simple=1, ), AnnAssign( target=Name(id='last_name', ctx=Store()), annotation=Name(id='str', ctx=Load()), value=None, simple=1, ), AnnAssign( target=Name(id='birthdate', ctx=Store()), annotation=Name(id='date', ctx=Load()), value=None, simple=1, ), ], decorator_list=[Name(id='dataclass', ctx=Load())], ) """ RULE = "SCR119 Use a dataclass for 'class {classname}'" errors: List[Tuple[int, int, str]] = [] if not (len(node.decorator_list) == 0 and len(node.bases) == 0): return errors dataclass_functions = [ "__init__", "__eq__", "__hash__", "__repr__", "__str__", ] has_only_dataclass_functions = True has_any_functions = False has_complex_statements = False for body_el in node.body: if isinstance(body_el, (ast.FunctionDef, ast.AsyncFunctionDef)): has_any_functions = True if body_el.name == "__init__": # Ensure constructor only has pure assignments # without any calculation. for el in body_el.body: if not isinstance(el, ast.Assign): has_complex_statements = True break # It is an assignment, but we only allow # `self.attribute = name`. if any( [ not isinstance(target, ast.Attribute) for target in el.targets ] ) or not isinstance(el.value, ast.Name): has_complex_statements = True break if body_el.name not in dataclass_functions: has_only_dataclass_functions = False if ( has_any_functions and has_only_dataclass_functions and not has_complex_statements ): errors.append( (node.lineno, node.col_offset, RULE.format(classname=node.name)) ) return errors
eef10160a580c4b09451e69dd3054499e1d6ab91
656,690
import hashlib def md5(string): """Computes and returns an md5 digest of the supplied string Args: string: string to digest Returns: digest value """ return hashlib.md5(string.encode('UTF-8')).hexdigest()
c8204de8c173a4277cc4ea3218f7fd9a518a7348
260,929
def shuffle_data(data_df): """ Performs data shuffling. """ return data_df.sample(frac=1).reset_index(drop=True)
02b512b8198cb3ee4d294f4d67c64f5c57c74306
404,822
def remove_newlines(text): # type: (str) -> str """ Remove newlines. The `name` field serves as a displayable title. We remove newlines and leading and trailing whitespace. We also collapse consecutive spaces to single spaces. :param text: Text for newline removal :return: Single line of text :rtype: str """ return " ".join(text.split())
06a06d2756099fc45ffc1345bf0bef44cffa9f78
658,426
def merge_list_dictionaries(*dictionaries): """Merges dictionary list values from given dictionaries""" for addendum in dictionaries[1:]: for key in addendum: if key in dictionaries[0]: dictionaries[0][key] += addendum[key] else: dictionaries[0][key] = addendum[key] return dictionaries[0]
91b75c69f3281b3759193949537a03617630b88d
432,016
import torch def rankdata_pt(b, tie_method='ordinal', dim=0): """ pytorch equivalent of scipy.stats.rankdata, GPU compatible. :param b: torch.Tensor The 1-D or 2-D tensor of values to be ranked. The tensor is first flattened if tie_method is not 'ordinal'. :param tie_method: str, optional The method used to assign ranks to tied elements. The options are 'average', 'min', 'max', 'dense' and 'ordinal'. 'average': The average of the ranks that would have been assigned to all the tied values is assigned to each value. Supports 1-D tensors only. 'min': The minimum of the ranks that would have been assigned to all the tied values is assigned to each value. (This is also referred to as "competition" ranking.) Supports 1-D tensors only. 'max': The maximum of the ranks that would have been assigned to all the tied values is assigned to each value. Supports 1-D tensors only. 'dense': Like 'min', but the rank of the next highest element is assigned the rank immediately after those assigned to the tied elements. Supports 1-D tensors only. 'ordinal': All values are given a distinct rank, corresponding to the order that the values occur in `a`. The default is 'ordinal' to match argsort. :param dim: int, optional The axis of the observation in the data if the input is 2-D. The default is 0. :return: torch.Tensor An array of length equal to the size of `b`, containing rank scores. """ # b = torch.flatten(b) if b.dim() > 2: raise ValueError('input has more than 2 dimensions') if b.dim() < 1: raise ValueError('input has less than 1 dimension') order = torch.argsort(b, dim=dim) if tie_method == 'ordinal': ranks = order + 1 else: if b.dim() != 1: raise NotImplementedError('tie_method {} not supported for 2-D tensors'.format(tie_method)) else: n = b.size(0) ranks = torch.empty(n).to(b.device) dupcount = 0 total_tie_count = 0 for i in range(n): inext = i + 1 if i == n - 1 or b[order[i]] != b[order[inext]]: if tie_method == 'average': tie_rank = inext - 0.5 * dupcount elif tie_method == 'min': tie_rank = inext - dupcount elif tie_method == 'max': tie_rank = inext elif tie_method == 'dense': tie_rank = inext - dupcount - total_tie_count total_tie_count += dupcount else: raise ValueError('not a valid tie_method: {}'.format(tie_method)) for j in range(i - dupcount, inext): ranks[order[j]] = tie_rank dupcount = 0 else: dupcount += 1 return ranks
88bd901c945d6ca9b3d2e785c6b0ea627116a03f
689,557
import hashlib def calculate_hashes_from_pre_hashes(prehash_string_list, hashalg="sha256"): """Hash all strings in the list with the given algorithm. Returned in the appropriate NI format. """ hashValueList = [] for pre_hash_string in prehash_string_list: if hashalg == 'sha256': hash_string = 'ni:///sha-256;' + \ hashlib.sha256(pre_hash_string.encode('utf-8')).hexdigest() + '?ver=CBV2.0' elif hashalg == 'sha3-256': hash_string = 'ni:///sha3-256;' + \ hashlib.sha3_256(pre_hash_string.encode('utf-8')).hexdigest() + '?ver=CBV2.0' elif hashalg == 'sha384': hash_string = 'ni:///sha-384;' + \ hashlib.sha384(pre_hash_string.encode('utf-8')).hexdigest() + '?ver=CBV2.0' elif hashalg == 'sha512': hash_string = 'ni:///sha-512;' + \ hashlib.sha512(pre_hash_string.encode('utf-8')).hexdigest() + '?ver=CBV2.0' else: raise ValueError("Unsupported Hashing Algorithm: " + hashalg) hashValueList.append(hash_string) return hashValueList
68ee3bf1ee7ccae3ce6d6f5ce4701b1136f55e8d
314,593
def truncate(source, max_len: int, el: str = "...", align: str = "<") -> str: """Return a truncated string. :param source: The string to truncate. :param max_len: The total length of the string to be returned. :param el: The ellipsis characters to append to the end of the string if it exceeds max_len. :param align: The alignment for the string if it does not exceed max_len. :return: The truncated string. """ if type(source) is int: source = str(source) if source is not None and len(source) > 0: if len(source) < max_len: return source elif max_len < len(el) + 1: return "{s:{c}{a}{n}}".format(s=source[0], c=".", a=align, n=max_len) else: return source[:max_len - len(el)] + el if len(source) > max_len else source else: return ""
fae74fa46f1e3aaf06c9b2d7cf4be6f31fce2596
686,015
def translate_tokens(tokens, d): """ Produce set of translated tokens, returns number of tokens that were translated. All possible translations of a token are added. """ n_translated = 0 translated = set() for w in tokens: if w not in d: continue translated.update(d[w]) n_translated += 1 return n_translated, translated
79fa07b82c9aa6c180c92840f8cf91bdca3422cd
160,239
from math import fabs from datetime import datetime def pretty_date(d): """ Format the time delta between d and now human-friendly. E.g. 'in 2 hours', '20 seconds ago' :param d: :return: """ now = datetime.now() diff = now - d total_seconds = diff.seconds + diff.days * 24 * 60 * 60 sec = int(fabs(total_seconds)) if sec < 60: v = sec unit = 'second' + ('s' if v != 1 else '') elif sec < 60 * 60: v = sec / 60 unit = 'minute' + ('s' if v != 1 else '') elif sec < 60 * 60 * 24: v = sec / 60 / 60 unit = 'hour' + ('s' if v != 1 else '') else: v = sec / 60 / 60 / 24 unit = 'day' + ('s' if v != 1 else '') if total_seconds < 0: return 'in %i %s' % (v, unit) # future else: return '%i %s ago' % (v, unit)
153bf459c55d2d69c8d9638be1f91f66adfcac47
357,022
def friends(graph, user): """Returns a set of the friends of the given user, in the given graph""" return set(graph.neighbors(user))
125c3cc21be4cc29f9ff6f0ff0bb60b35a1074ba
7,710
import hashlib def get_hash(data): """ Get a checksum for the input data """ md5 = hashlib.md5() md5.update(data) return md5.hexdigest()
9b19f26528bcec8119b9886300634719cf74f059
415,524
from typing import List def accuracy_score(y_true: List, y_pred: List) -> float: """ Compute accuracy score Parameters ---------- y_true : list True labels y_pred : list Predicted labels Returns ------- float Accuracy score Examples -------- >>> from evaluations.classification import accuracy_score >>> accuracy_score([1, 1, 0, 0], [1, 1, 1, 0]) 0.75 """ count_true = sum([i == j for i, j in zip(y_true, y_pred)]) count_total = len(y_true) return count_true / count_total
552cc0fab91b8dd62b08e512fb2d9f232f5b3606
694,476
def to_list(*args: dict) -> dict: """Convert a list of dicts to a dict of lists.""" if len(args) == 0: raise ValueError("Input can't be empty.") keys = [set(d.keys()) for d in args] ref = keys[0] for test in keys[1:]: if ref != test: raise ValueError("All input dictionaries must have the same keys.") res = {key: list(d[key] for d in args) for key in keys[0]} return res
fa88a8c60b7255528dea61c193556e532d00c476
545,081
import re def _createNameReplacement(search, replace): """ Return a tuple containing a regex and replacement string. Regexes replace prefixes, suffixes, or middles, as long as the search is separated with '_' from adjacent characters. """ regex = re.compile('(?<![^_]){0}(?=(_|$))'.format(search)) return (regex, replace)
7abfbf06683d887fb2f3b5d7f15bb77cfdd259ff
225,080
def message_warning(msg, *a, **kwargs): """Ignore everything except the message.""" return str(msg) + '\n'
a39440007b6e07c4810e73a9c16edd06316962e4
638,035
def enl_seq(seq_of_seq, otherentries): """ Returns a sequence of sequences, with each element consisting of an element from otherentries appended to a sequence from seq_of_seq. Note: this is used for building chords. """ ret = []; for i in seq_of_seq: for j in otherentries: ret.append(i + [j]); return ret;
9c1220c9fa91d70246c808c6574b2ac4b29f34b5
264,797
def _get_statusline(params): """Generate a mock statusline like py.test would output.""" info_elements = [] if 'failed' in params: info_elements.append('{failed} failed') if 'passed' in params: info_elements.append('{passed} passed') if 'error' in params: info_elements.append('{error} error') info_string = ', '.join(info_elements) return '=== {0} in 1.00 seconds ==='.format(info_string)
fafc090c2da72487453ae92e19cdd26808f1b6dc
485,025
def cshock_dissipation_time(shock_vel,initial_dens): """A simple function used to calculate the dissipation time of a C-type shock. Use to obtain a useful timescale for your C-shock model runs. Velocity of ions and neutrals equalizes at dissipation time and full cooling takes a few dissipation times. Args: shock_vel (float): Velocity of the shock in km/s initial_dens (float): Preshock density of the gas in cm$^{-3}$ Returns: float: The dissipation time of the shock in years """ pc=3.086e18 #parsec in cgs SECONDS_PER_YEAR=3.15569e7 dlength=12.0*pc*shock_vel/initial_dens return (dlength*1.0e-5/shock_vel)/SECONDS_PER_YEAR
b97a184a60ccc1587eee40bad6325131a1348c98
90,432
def convert_to_aws_ecr_compatible_format(string): """Make string compatible with AWS ECR repository naming Arguments: string {string} -- Desired ECR repository name Returns: string -- Valid ECR repository name """ return string.replace(" ", "-").lower()
badde81dc7a6b6e53f52e60733b6623370a8f9e7
487,164
def rotate_left(a): """ Returns the result of rotating the matrix a 90 degrees left. """ return list(zip(*[reversed(r) for r in a]))
6453b944b770cb6b18d039518bf1d10dff816b08
284,972
def cols_change_list() -> list: """ Returns the list of columns that need to be processed.""" cols_s1_change = ['12th_grade', '11th_grade', '10th_grade', 'Indicador de Sucesso', 'pct_stud_in_need_12th_grade', 'pct_profs_on_the_board', 'average_moms', 'average_dads', 'Média'] return cols_s1_change
e797caba670db0e85ff15fafbca553aebe838bc2
282,463
def _IsSpecified(args, name): """Returns true if an arg is defined and specified, false otherwise.""" return hasattr(args, name) and args.IsSpecified(name)
3730179ad3913d95d9cc4cdbfd168ee92f54aff5
67,800
import re def format_imports(text): """ Takes a string of text and formats it based on rule 3 (see docs). """ # rule to find consective imports regex4 = r"^import[\s\w]+?(?=from|^\s*$)" # this subsitution will happen with a function def subst4(match_obj): pattern = r"import (\w+)" ind_imports = re.findall(pattern, match_obj.group(0)) return r"import " + ", ".join(ind_imports) + "\n" text = re.sub(regex4, subst4, text, 0, re.MULTILINE) # return formatted text return text
2b66fded4660396c1b386daa99666250aa5a3f06
669,074
from typing import List def double(items: List[str]) -> List[str]: """ Returns a new list that is the input list, repeated twice. """ return items + items
9e4b6b9e84a80a9f5cbd512ca820274bb8cad924
706,540
def cov(df, groupby_columns:list, value_column:str): """ Calculates the coefficient of variation for data grouped by specific columns Args: df: pandas dataframe groupby_columns: list of column names to group the data by value_column: string name of column containing the values for which you want cov calculated Returns: result: a pandas df with grouped statistics for count, mean, population standard deviation, and cov """ # define a function to calculate the population standard deviation with ddof=0 std_p = lambda x: x.std(ddof=0) std_p.__name__ = 'std_p' columns_to_keep = groupby_columns + [value_column] df = df.copy()[columns_to_keep] result = df.groupby(groupby_columns).agg(['count','mean',std_p]) result = result.droplevel(level=0, axis=1) result['cov'] = result['std_p'] / result['mean'] return result
881cce82ff323e0295731901bafeaeac5e47c0bc
81,379
import mimetypes def is_pdf(file_path): """ Checks whether the file is a pdf. """ type = mimetypes.guess_type(file_path)[0] return type and type == 'application/pdf'
5d34f7c1a6e581a882c92d7dc664afc924de8bbe
445,344
def _organize_parameter(parameter): """ Convert operation parameter message to its dict format. Args: parameter (OperationParameter): Operation parameter message. Returns: dict, operation parameter. """ parameter_result = dict() parameter_keys = [ 'mapStr', 'mapBool', 'mapInt', 'mapDouble', ] for parameter_key in parameter_keys: base_attr = getattr(parameter, parameter_key) parameter_value = dict(base_attr) # convert str 'None' to None for key, value in parameter_value.items(): if value == 'None': parameter_value[key] = None parameter_result.update(parameter_value) # drop `mapStrList` and `strValue` keys in result parameter str_list_para = dict(getattr(parameter, 'mapStrList')) result_str_list_para = dict() for key, value in str_list_para.items(): str_list_para_list = list() for str_ele in getattr(value, 'strValue'): str_list_para_list.append(str_ele) str_list_para_list = list(map(lambda x: None if x == '' else x, str_list_para_list)) result_str_list_para[key] = str_list_para_list parameter_result.update(result_str_list_para) return parameter_result
8cbd7c863bb244e71266a573ba756647d0ba13ea
708,639
def parse_config_option(line, is_global, dt_keys, global_key_types): """Parses a single line from the configuration file. Args: line: String containing the key=value line from the file. is_global: Boolean indicating if we should parse global or DT entry specific option. dt_keys: Tuple containing all valid DT entry and global option strings in configuration file. global_key_types: A dict of global options and their corresponding types. It contains all exclusive valid global option strings in configuration file that are not repeated in dt entry options. Returns: Returns a tuple for parsed key and value for the option. Also, checks the key to make sure its valid. """ if line.find('=') == -1: raise ValueError('Invalid line (%s) in configuration file' % line) key, value = (x.strip() for x in line.split('=')) if is_global and key in global_key_types: if global_key_types[key] is int: value = int(value) elif key not in dt_keys: raise ValueError('Invalid option (%s) in configuration file' % key) return key, value
016ca756bdad85524a9ab0c4a8526762db3ef4c7
148,258
def time2str(t): """Takes a value of time as a number of seconds and returns it in the form "hh:mm:ss". Parameters ---------- t : int Time in seconds. Returns ------- s : str Time in the form "hh:mm:ss". Hours omitted if there are none. """ hours = t // 3600 min_sec = t % 3600 mins = min_sec // 60 sec = min_sec % 60 if hours < 10 or hours != 0: hours = "0" + str(hours) else: hours = str(hours) if mins < 10: mins = "0" + str(mins) else: mins = str(mins) if sec < 10: sec = "0" + str(sec) else: sec = str(sec) if hours == "00": s = "" + mins + ":" + sec else: s = "" + hours + ":" + mins + ":" + sec return s
58d01cc7699c957379d2a6841162664596d07811
163,007
def _eos(veos): """ EOS = DOY for end of seasonn """ return veos.time.dt.dayofyear
1dba9aa2ec775283371e3c6e8c1d8fc1b4b85831
473,299
import string def expand_vars(template, **variables): """ Expands a configuration template which uses $-style substitutions. """ template = string.Template(template) return template.safe_substitute(**variables)
fd31f64994b51088eccf03fdbd4a81757c6dd6c5
545,728
def get_resource_and_action(action, pluralized=None): """Extract resource and action (write, read) from api operation.""" data = action.split(':', 1)[0].split('_', 1) resource = pluralized or ("%ss" % data[-1]) return (resource, data[0] != 'get')
3ba6b9fc8e2b6d246500bb4864e8222b00f79209
458,921
def get_p7_seq(x): """ Return the sequence of P7: TruSeq, Nextera, smallRNA """ p7 = { 'truseq': ['AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC', 6, 'ATCTCGTATGCCGTCTTCTGCTTG'], 'nextera': ['CTGTCTCTTATACACATCTCCGAGCCCACGAGAC', 8, 'ATCTCGTATGCCGTCTTCTGCTTG'], 'smallrna': [], } return p7.get(x.lower(), None)
700cad512b0019f37a66cb33665a8d994ebd63e9
605,843
import logging def reconstruct_position(pos, deleted_pos): """Computes the prior position of one column in an MSA. pos: index of current position in an MSA deleted_positions: list with column indexes that were removed It returns the position of a column in an MSA that has undergone deletion of several columns; the list deleted_pos contains the indexes of the columns that have been deleted, prior to deletion.""" logging.info("Reconstructing original positions of the matrix.") diff = 0 deleted_pos = sorted(deleted_pos) for i in range(len(deleted_pos)): if deleted_pos[i] <= pos + diff: diff += 1 else: break return pos + diff
07cad43b603c83140afaa40a82caeaf75918a019
331,309
def _get_output_file_idx(build_args): """ Get the index of output file from build args :param build_args: arguments of gcc :return: index in the build args where output file is. """ i = 0 while i < len(build_args): if build_args[i] == "-o": return i + 1 i += 1 return -1
1d8ff0b30525c1074feb88cf2646b910fe7f7db8
492,507
def frequencies(colorings): """ Procedure for computing the frequency of each colour in a given coloring. :param colorings: The given coloring. :return: An array of colour frequencies. """ maxvalue = -1 frequency = [0] * (len(colorings)) for i in colorings: maxvalue = max(maxvalue, i) frequency[i] += 1 frequency = frequency[:maxvalue + 1] return frequency
f9509e4c6ff253656a5e3d734e3b913c4428ec1c
668,071
def get_duration(s): """ Calculate trip duration """ pickup = s['tpep_pickup_datetime'] dropoff = s['tpep_dropoff_datetime'] if not all((pickup, dropoff)): return None duration_seconds = (dropoff - pickup).seconds return duration_seconds
b92e09ecf666d338f8b004cfdf8d3b80d6d19e57
277,946
def getVarTimeRange(dataArray, rangeMin, rangeMax, timeStrt, timeEnd): """ This function extracts the radar variable for a desired range and time interval. Arguments --------- dataArray : xarray DataArray of the variable rangeMin : minimum range in m rangeMax : maximum range in m timeStrt : starting time (pandas datetime) timeEnd : ending time (pandas datetime) Returns ------- dataArray : xarray DataArray The select DataArray """ dataArray = dataArray.sel(time=slice(timeStrt, timeEnd)) dataArray = dataArray.sel(range=slice(rangeMin, rangeMax)) return dataArray
3baa45abe77a81786c34ace3e2e8c6ad67dfd6ba
447,054
import configparser def get_aws_config(path="~/.aws/credentials"): """Get the AWS configuration""" config = configparser.ConfigParser() config.read(path) return config
76426db61b986b0e0c749caa39078a902e68c76a
445,708
import torch def build_mse_loss(loss_coef, correspondence_keys=None): """ Build the mean squared error loss function. Args: loss_coef (dict): dictionary containing the weight coefficients for each property being predicted. Example: `loss_coef = {'energy': rho, 'force': 1}` correspondence_keys (dict): a dictionary that links an output key to a different key in the dataset. Example: correspondence_keys = {"autopology_energy_grad": "energy_grad"} This tells us that if we see "autopology_energy_grad" show up in the loss coefficient, then the loss should be calculated between the network's output "autopology_energy_grad" and the data in the dataset given by "energy_grad". This is useful if we're only outputting one quantity, such as the energy gradient, but we want two different outputs (such as "energy_grad" and "autopology_energy_grad") to be compared to it. Returns: mean squared error loss function """ correspondence_keys = {} if (correspondence_keys is None) else correspondence_keys def loss_fn(ground_truth, results): """Calculates the MSE between ground_truth and results. Args: ground_truth (dict): e.g. `{'energy': 2, 'force': [0, 0, 0]}` results (dict): e.g. `{'energy': 4, 'force': [1, 2, 2]}` Returns: loss (torch.Tensor) """ assert all([k in results.keys() for k in loss_coef.keys()]) assert all([k in [*ground_truth.keys(), *correspondence_keys.keys()] for k in loss_coef.keys()]) loss = 0.0 for key, coef in loss_coef.items(): if key not in ground_truth.keys(): ground_key = correspondence_keys[key] else: ground_key = key targ = ground_truth[ground_key] pred = results[key].view(targ.shape) # select only properties which are given valid_idx = torch.bitwise_not(torch.isnan(targ)) targ = targ[valid_idx] pred = pred[valid_idx] if len(targ) != 0: diff = (targ - pred ) ** 2 err_sq = coef * torch.mean(diff) loss += err_sq return loss return loss_fn
003e8ad9315e3b9e8d49ee5a7fedd0229502cffd
470,438
def number_to_string(n, alphabet): """ Given an non-negative integer ``n``, convert it to a string composed of the given ``alphabet`` mapping, where the position of each element in ``alphabet`` is its radix value. Examples:: >>> number_to_string(12345678, '01') '101111000110000101001110' >>> number_to_string(12345678, 'ab') 'babbbbaaabbaaaababaabbba' >>> number_to_string(12345678, string.ascii_letters + string.digits) 'ZXP0' >>> number_to_string(12345, ['zero ', 'one ', 'two ', 'three ', 'four ', 'five ', 'six ', 'seven ', 'eight ', 'nine ']) 'one two three four five ' """ result = '' base = len(alphabet) current = int(n) if current < 0: raise ValueError("invalid n (must be non-negative): %s", n) while current: result = alphabet[current % base] + result current = current // base return result
6be6b4e725331dbaa10ded7bdfa6daffd7a967d1
315,881
import math def solar_declination(doy): """ Calculate the solar declination for a given day of the year (Allen et al. 1998). Parameters ---------- doy : int Julian day of the year (-). Returns ------- float Solar declination (rad). """ return 0.409 * math.sin(((2 * math.pi) / 365) * doy - 1.39)
db1b6bbf38c69ffbb0b5f5ba47283cdf2cd7a4dd
627,173
def GetGTestOutput(args): """Extracts gtest_output from the args. Returns none if not present.""" for arg in args: if '--gtest_output=' in arg: return arg.split('=')[1] return None
d2dfd96c30184209120d656a2c797d66e96bde1b
481,640
def get_table_number_of_rows(cursor, table_name): """ Return the number of rows of a table """ sql_command = f"SELECT COUNT(*) FROM '{table_name}';" v = cursor.execute(sql_command).fetchall() assert len(v) == 1 assert len(v[0]) == 1 return v[0][0]
310d64d0b25c8729a838a5c5335b80f3a0bb8e6a
71,219
def to_numpy(var): """Tensor --> numpy Parameters: var (tensor): tensor Returns: var (ndarray): ndarray """ return var.data.numpy()
6e5ae6ad239306cbd1d0f8f04e339bd08398e693
405,465
def area_triangle(b, h): """ Returns the area of a triangle given its base and its height """ area = b * h / 2 return area
a9bdd783f1a43fe73bcd7338120ffaef4863fac6
627,500
def int_bytes_to_programmatic_units(byte_value): """Convert a byte count into OVF-style bytes + multiplier. Inverse operation of :func:`programmatic_bytes_to_int` Args: byte_value (int): Number of bytes Returns: tuple: ``(base_value, programmatic_units)`` Examples: :: >>> int_bytes_to_programmatic_units(2147483648) ('2', 'byte * 2^30') >>> int_bytes_to_programmatic_units(2147483647) ('2147483647', 'byte') >>> int_bytes_to_programmatic_units(134217728) ('128', 'byte * 2^20') >>> int_bytes_to_programmatic_units(134217729) ('134217729', 'byte') """ shift = 0 byte_value = int(byte_value) while byte_value % 1024 == 0: shift += 10 byte_value /= 1024 byte_str = str(int(byte_value)) if shift == 0: return (byte_str, "byte") return (byte_str, "byte * 2^{0}".format(shift))
b88bf07a3d60427ffbc964384aa2fdb38fa20694
289,602
def polyfill_integers(generator, low, high=None, size=None, dtype="int32", endpoint=False): """Sample integers from a generator in different numpy versions. Parameters ---------- generator : numpy.random.Generator or numpy.random.RandomState The generator to sample from. If it is a ``RandomState``, :func:`numpy.random.RandomState.randint` will be called, otherwise :func:`numpy.random.Generator.integers`. low : int or array-like of ints See :func:`numpy.random.Generator.integers`. high : int or array-like of ints, optional See :func:`numpy.random.Generator.integers`. size : int or tuple of ints, optional See :func:`numpy.random.Generator.integers`. dtype : {str, dtype}, optional See :func:`numpy.random.Generator.integers`. endpoint : bool, optional See :func:`numpy.random.Generator.integers`. Returns ------- int or ndarray of ints See :func:`numpy.random.Generator.integers`. """ if hasattr(generator, "randint"): if endpoint: if high is None: high = low + 1 low = 0 else: high = high + 1 return generator.randint(low=low, high=high, size=size, dtype=dtype) return generator.integers(low=low, high=high, size=size, dtype=dtype, endpoint=endpoint)
b4061e8ec7cb9927bbe4fcce1c847aecdc10052b
9,544
def get_appropriated_part_size(file_size): """ Gets the appropriated part size when uploading or downloading files, given an initial file size. """ if file_size <= 104857600: # 100MB return 128 if file_size <= 786432000: # 750MB return 256 if file_size <= 2097152000: # 2000MB return 512 raise ValueError('File size too large')
ac1f9aa0dbfd63984879df49abeeca1f0af6ead8
340,345
import click def success_style(success_string: str) -> str: """ Styling function to emphasise bullet names. :param success_string: The string to style :type success_string: str :return: Styled success string :rtype: str """ return click.style(success_string, fg="green")
ce658ec661193c4381c458f84e2e6ee2f300d281
300,131
from typing import Union from typing import List from typing import Tuple import torch def get_qubit_indices( index: Union[int, List[int], Tuple[int, ...], torch.Tensor], state_tensor: torch.Tensor, num_qubits: int ): """Convert qubit indices 0, ..., n-1 to correct PyTorch tensor indices. Consider a state with two qubits in tensor layout and with one batch dimension. The size is (7, 2, 2) if the batch length is 7. The first dimension (with torch index 0) refers to the batch while the last two refer to qubits. If we assign the first and second qubits "qubit indices" 0 and 1 respectively, then the torch indices of (0 and 1) are (1 and 2) respectively. Examples: >>> state = torch.rand(2, 2) >>> get_qubit_indices(0, state, num_qubits=2) 0 >>> state = torch.rand(500, 17, 2, 2, 2) >>> get_qubit_indices(0, state, num_qubits=3) 2 >>> state = torch.rand(500, 17, 2, 2, 2) >>> get_qubit_indices([1, 0], state, num_qubits=3) [3, 2] >>> # Negative indices are not converted to positive: >>> state = torch.rand(500, 17, 2, 2, 2) >>> get_qubit_indices([-1, 0], state, num_qubits=3) [-1, 2] """ batch_dims = state_tensor.dim() - num_qubits range_message = 'Expected index in {-num_qubits, ..., num_qubits - 1}.\n' range_message += f'Num_qubits: {num_qubits}, index: {index}.' convert = False if not isinstance(index, torch.Tensor): convert = True index = torch.tensor(index) if (index >= num_qubits).any() or (index < -num_qubits).any(): raise ValueError(range_message) index = index + (index >= 0) * batch_dims if convert: return index.tolist() else: return index
0f78ed23417d447450622feb370eeb399650e870
500,875
import six def pad(data, bs=16): """PKCS#7 Padding. Takes a bytestring data and an optional blocksize 'bs'""" length = bs - (len(data) % bs) data += six.int2byte(length) * length return data
e5a1c422f4021da2e7afc1af6483c97e99e6b6a6
79,219
from typing import Optional def validate_discharge_efficiency(discharge_efficiency: Optional[float]) -> Optional[float]: """ Validates the discharge efficiency of an object. Discharge efficiency is always optional. :param discharge_efficiency: The discharge efficiency of the object. :return: The validated discharge efficiency. """ if discharge_efficiency is None: return None if discharge_efficiency < 0 or discharge_efficiency > 1: raise ValueError("Discharge efficiency must be between 0 and 1.") return discharge_efficiency
689e951673954f2112112d2d6e1c358f9ef7678b
615,852
def read_network_from_file(file_name, delimeter=','): """ Read from a file and build a network file_name: file to read from delimeter: delimeter that separates fields """ cities = list() distances = dict() f = open(file_name, 'r') lines = f.readlines() for line in lines: fields = line.rstrip().split(delimeter) city_1 = fields[0].strip(' ') city_2 = fields[1].strip(' ') distance = float(fields[2]) # build the list of cities if city_1 not in cities: cities.append(city_1) if city_2 not in cities: cities.append(city_2) # build the dictionary based on city distances if cities.index(city_1) not in distances.keys(): distances[cities.index(city_1)] = {cities.index(city_2): distance} if cities.index(city_2) not in distances[cities.index(city_1)].keys(): distances[cities.index(city_1)][cities.index(city_2)] = distance return cities, distances
c2f3562c7cf314a45a4449f0e23ddab0f1bcaec9
183,810
from pathlib import Path def get_last_file(base_dir, pattern): """Return the last file in a folder that match the given pattern. Parameters ---------- base_dir: str or Path Base directory to search for a matching file. pattern: str Pattern used in glob to find files. Returns ------- fname: str, name of a matching file. """ base_dir = Path(base_dir) return sorted(base_dir.glob(pattern), key=lambda x: x.stat().st_ctime, reverse=True)[0]
a1f0f9fdea03f8271c2e0320496c9d11a976c4a2
253,509
from typing import Callable def runge_kutta_method_o4(f: Callable[[float, float], float], y0: float, x: list[float], h: float, precission: int = 5) -> float: """Finds the value of y(x) for some ODE using Runge Kutta method (order 4). Params ------ f: Callable[[float, float], float] function representing dy/dx, i.e., f(x, y) y0: float value of y at x0 x: list[float] list of values at which y has to be computed h: float interval size Returns ------- float value of y(x) """ yi = y0 for i, xi in enumerate(x[:-1], 1): k1 = h*f(xi, yi) k2 = h*f(xi + h/2, yi + k1/2) k3 = h*f(xi + h/2, yi + k2/2) k4 = h*f(xi + h, yi + k3) yi = yi + (k1 + 2*k2 + 2*k3 + k4)/6 print(f"y{i}={yi:.{precission}f}\n\t{k1=:.{precission}f}\n\t{k2=:.{precission}f}\n\t{k3=:.{precission}f}\n\t{k4=:.{precission}f}") return yi
5f8f19bdf6e565b63d350888bd7b08f36f7d4ee7
635,996
def get_file_names_on_anaconda_channel(username, anaconda_cli, channel='main'): """Get the names of **all** the files on anaconda.org/username Parameters ---------- username : str anaconda_cli : return value from binstar_client.utils.get_binstar() channel : str, optional The channel on anaconda.org/username to upload to. Defaults to 'main' Returns ------- set The file names of all files on an anaconda channel. Something like 'linux-64/album-0.0.2.post0-0_g6b05c00_py27.tar.bz2' """ return set( [f['basename'] for f in anaconda_cli.show_channel(channel, username)['files']])
4f547249fabf7a199976a6207820df46833588dd
612,742
def index_of(array, value, from_index=0): """Gets the index at which the first occurrence of value is found. Args: array (list): List to search. value (mixed): Value to search for. from_index (int, optional): Index to search from. Returns: int: Index of found item or ``-1`` if not found. Example: >>> index_of([1, 2, 3, 4], 2) 1 >>> index_of([2, 1, 2, 3], 2, from_index=1) 2 .. versionadded:: 1.0.0 """ try: return array.index(value, from_index) except ValueError: return -1
57b7bfee4f74bd448b1fb95e3a30a1df85362345
217,869
def knot_insertion_alpha(u, knotvector, span, idx, leg): """ Computes :math:`\\alpha` coefficient for knot insertion algorithm. :param u: knot :type u: float :param knotvector: knot vector :type knotvector: tuple :param span: knot span :type span: int :param idx: index value (degree-dependent) :type idx: int :param leg: i-th leg of the control points polygon :type leg: int :return: coefficient value :rtype: float """ return (u - knotvector[leg + idx]) / (knotvector[idx + span + 1] - knotvector[leg + idx])
51e3e61eae5e562c47b67904873e4ffe327bb842
674,601
def get_forecast_metadata_variables(ds): """ Returns a list of variables that represent forecast reference time metadata. :param netCDF4.Dataset ds: An open netCDF4 Dataset. :rtype: list """ forecast_metadata_standard_names = { "forecast_period", "forecast_reference_time", } forecast_metadata_variables = [] for varname in ds.variables: standard_name = getattr(ds.variables[varname], "standard_name", None) if standard_name in forecast_metadata_standard_names: forecast_metadata_variables.append(varname) return forecast_metadata_variables
83b8fe0eb785c1a3129ec19df680ce135cd3fa82
7,062
def max_factor(num): """Find the maximum prime factor.""" factor = 2 while factor * factor <= num: while num % factor == 0: num /= factor factor += 1 if (num > 1): return num return factor
3b29d27d77454c1a52f483936b6a6bd94b98e306
386,640