content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def myst_extensions(no_md=False): """The allowed extensions for the myst format.""" if no_md: return [".myst", ".mystnb", ".mnb"] return [".md", ".myst", ".mystnb", ".mnb"]
4a71ae74b57ed8a94d751489b6a1ed92818d4edc
101,125
from pathlib import Path def category_files(category: str) -> list: """Helper function to return all filenames in the category directory.""" files = [ x.stem for x in list(Path(f"pandas_ta/{category}/").glob("*.py")) if x.stem != "__init__" ] return files
37a06c5b2a6c36347666166f4ae86d49d750452e
101,126
import pickle def unit(name, attribute='gcost'): """ Returns Unit resource cost by Unit Name. :param name: Unit Name :param attribute: Attribute Value (gcost (default), rcost) :return: Decoded attribute """ file = open('./battlefortune/data/units', 'rb') output = pickle.load(file)[name][attribute] return output
ef979053e5e839a0457b94feb5b033d5b324a5d5
101,130
from typing import Iterable import itertools def all_equal(seq: Iterable) -> bool: """Check that all elements of a sequence are equal.""" grouped = itertools.groupby(seq) first = next(grouped, (None, grouped)) second = next(grouped, None) return first and not second
262ac0d5f13d838ddec136a5d3b7982d7499b6a7
101,132
def lcm(num1, num2): """Returns the lowest common multiple of two given integers.""" temp_num = num1 while (temp_num % num2) != 0: temp_num += num1 return temp_num
bdb1c66c2e155fe930ffb84d2657e1536d2387dc
101,134
def get_branch_name(ref: str, ref_prefix='refs/heads/', clean=True, max_len=15): """ Get and preprocess the branch name from the reference within a CodeCommit event. :param ref: As obtained from the CodeCommit event :param ref_prefix: :param clean: :param max_len: Maximal length of the resulting branch_name. If None, the returned string will not be shortened :return : Branch name. If clean is set to True replaces "/" by "-" in the branch name """ if ref.startswith(ref_prefix): branch_name = ref[len(ref_prefix):] else: raise Exception(f"Expected reference {ref} to start with ${ref_prefix}") if clean: branch_name = branch_name.replace('/', '-').lower() if max_len is not None: branch_name = branch_name[:max_len] return branch_name
48a12bacbdb2b7d51d61cfd941fa343c04073b97
101,136
import torch def least_squares(a, b, rcond=None): """ A PyTorch implementation of NumPy's "linalg.lstsq" function Parameters ---------- a : torch.Tensor (m,n) "Coefficient" matrix b : torch.Tensor (m,) or (m,k) "dependent variable" values rcond : float Cutt-off ratio for small singular values of a. For the purposes of rank determination, singular values are treated as zero if they are smaller than rcond times the largest singular value of a. If `None`, the default will use the machine precision times max(m, n) Returns ------- x : torch.Tensor (n,) or (n,k) least-squares solution residuals: torch.Tensor (1,) or (k,) or (0,) sums of residuals; squared Euclidean 2-norm for each column in b - a*x. If rank(a) < n or m <= n, this is an empty array. If b is 1-dimensional, this is a (1,) shape array. Otherwise the shape is (k,). rank : int rank of matrix a s : torch.Tensor (min(m,n),) singular values of matrix a """ m,n = a.shape if rcond is None: rcond = max(a.shape)*torch.finfo(a.dtype).eps U, s, V = torch.svd(a) rank = torch.sum(s > rcond*s[0]).item() s_inv = torch.where(s > rcond*s[0], s.reciprocal(), torch.zeros_like(s)) x = torch.matmul(V, s_inv.unsqueeze(1) * torch.matmul(U.T, b)) if rank < n or m <= n: residuals = torch.tensor([]) else: residuals = torch.sum((a@x - b)**2, 0, keepdim=len(b.shape)==1) return x, residuals, rank, s
676703ff9e9407efe5a11476009dc90dba70db63
101,137
def _get_mobility_factor_counts_for_reasonable_r(results, method, lower_R=0.5, upper_R=1.5): """ Returns the count of mobility factors for which method resulted in an R value between `lower_R` and `upper_R`. Args: results (pd.DataFrame): Dataframe with rows as methods and corresponding simulation metrics. method (str): name of the intervention method for which range is to be found lower_R (float): a valid lower value of R upper_R (float): a valid upper value of R Returns: (pd.Series): A series where each row is a mobility factor and corresponding number of simulations that resulted in valid R. """ selector = results['method'] == method correct_R = (lower_R <= results[selector]['r']) & (results[selector]['r'] <= upper_R) return results[selector][correct_R][['mobility_factor']].value_counts()
d7f71cb9383967aae6720a23c4d25daf396487fe
101,139
def create_clip_in_selected_slot(creator, song, clip_length=None): """ Create a new clip in the selected slot of if none exists, using a given creator object. Fires it if the song is playing and displays it in the detail view. """ selected_slot = song.view.highlighted_clip_slot if creator and selected_slot and not selected_slot.has_clip: creator.create(selected_slot, clip_length, legato_launch=True) song.view.detail_clip = selected_slot.clip return selected_slot.clip
675b793b346aa40744b36ea30f6828be06bd49be
101,143
def rows_to_dicts(cur, rows): """ Converts cursor result to a list of dicts rather than list of tuples. Args: cur: sqlite3 database cursor object rows (list<tuple>): output of cursor fetch Returns: List of dicts where each key is a column name """ cols = [col[0] for col in cur.description] return [ dict(zip(cols, vals)) for vals in rows ]
8d6b7f4abbb1b72e3a4bd3340ce35ae5b1f5b4d7
101,147
def generate_access(metadata): """Generates access metadata section. https://oarepo.github.io/publications-api/schemas/publication-dataset-v1.0.0.html#allOf_i0_allOf_i1_access """ return { 'record': 'restricted', 'files': 'restricted', 'owned_by': [] }
d34615e28f752ad77f5519e0f09c299523a60efb
101,148
def with_metaclass(meta, *bases): """ Create a base class with a metaclass. For example, if you have the metaclass >>> class Meta(type): ... pass Use this as the metaclass by doing >>> from sympy.core.compatibility import with_metaclass >>> class MyClass(with_metaclass(Meta, object)): ... pass This is equivalent to the Python 2:: class MyClass(object): __metaclass__ = Meta or Python 3:: class MyClass(object, metaclass=Meta): pass That is, the first argument is the metaclass, and the remaining arguments are the base classes. Note that if the base class is just ``object``, you may omit it. >>> MyClass.__mro__ (<class 'MyClass'>, <... 'object'>) >>> type(MyClass) <class 'Meta'> """ class metaclass(meta): __call__ = type.__call__ __init__ = type.__init__ def __new__(cls, name, this_bases, d): if this_bases is None: return type.__new__(cls, name, (), d) return meta(name, bases, d) return metaclass("NewBase", None, {})
a4a9f7d096312f97c7c5af0e5f520bb80313524c
101,150
def camel_to_snake(name): """ Converts camelCase to snake_case variable names Used in the Fleur parser to convert attribute names from the xml files """ name = name.replace('-', '') return ''.join(['_' + c.lower() if c.isupper() else c for c in name]).lstrip('_')
4ce20f914fefb33a41ca0a0a5fcba27f0b3a84ba
101,153
def get_all_sell_orders(market_datas): """ Get all sell orders from the returned market data :param market_datas: the market data dictionary :return: list of sell orders """ sell_orders = [] for _, market_data in market_datas.items(): for order in market_data: if not order['is_buy_order']: sell_orders.append(order) return sell_orders
47fe504b72d9028d4d2e999ee4b748d9dae7f6ee
101,156
def update_infer_params( model_configs, beam_size=None, maximum_labels_length=None, length_penalty=None): """ Resets inference-specific parameters. Args: model_configs: A dictionary of all model configurations. beam_size: The beam width, if provided, pass it to `model_configs`'s "model_params". maximum_labels_length: The maximum length of sequence that model generates, if provided, pass it to `model_configs`'s "model_params". length_penalty: The length penalty, if provided, pass it to `model_configs`'s "model_params". Returns: An updated dict. """ if beam_size is not None: model_configs["model_params"]["inference.beam_size"] = beam_size if maximum_labels_length is not None: model_configs["model_params"]["inference.maximum_labels_length"] = maximum_labels_length if length_penalty is not None: model_configs["model_params"]["inference.length_penalty"] = length_penalty return model_configs
85c96f32160bc5900f7442f5c1949cd6039bec6c
101,159
def _evaluate_features(features_name: list, features_index: list, config_features_available: bool): """ This ensures that (1) if feature names are provided in the config file, one and only one of the arguments, `features_name` or `features_index`, might be given, and (2) if feature names are NOT provided in the config file, one and only one of the arguments MUST be given. :param features_name: See the same argument in `do_extraction`. :param features_index: See the same argument in `do_extraction`. :param config_features_available: `True` if the key `STATISTICAL_FEATURES` in the config file, is associated with a list of features, and `False` otherwise. :return: True, if no exception was raised. """ if features_index is None: features_index = [] if features_name is None: features_name = [] given_by_list, given_by_index = False, False if len(features_name) > 0: given_by_list = True if len(features_index) > 0: given_by_index = True if not config_features_available: # if features in config file are not provided if given_by_list and not given_by_index: return True else: # if (1) both args are given, or (2) if none of them are provided, or (3) if # params_index is given raise ValueError( """ If a list of feature names is not provided by the config file, the arg `features_name` and only that MUST be given. """ ) else: if given_by_list + given_by_index > 1: # if both args are provided raise ValueError( """ Both of the arguments, `features_name` and `features_index`, cannot be given at the same time. """ ) return True
d2fc9589e007866ea7e5809aceb9998c16d41b81
101,161
def dataToComponent(data, component): """Converts data to the desired component. Parameters ---------- data : `dict`, `object` Data from which the component will be constructed. This can be an dictionary with keys that are exact match for parameters required by component, or a superset, or could be an instance of component already. component : `cls` Class the data will convert to. """ compValue = None if isinstance(data, dict): try: compValue = component(**data) except KeyError: compValue = component.fromDictSubset(data) elif isinstance(data, component): compValue = data return compValue
dd5f58d5ee5a4cb2c81a71940ebd0a8f5af6dd92
101,164
def _get_certificate(client, vault_base_url, certificate_name): """ Download a certificate from a KeyVault. """ cert = client.get_certificate(vault_base_url, certificate_name, '') return cert
2367367cb0bb7131934d13731dab15ddcdce525b
101,165
def layer_architecture(model): """Get a Keras model config as a dictionary. Strip all info like layer names that are not essential to the definition of the model. Parameters ---------- model : dict or keras.engine.training.Model A Keras model or a dict representing the config of the model (result of `model.get_config()`) Returns ------- dict The model configuration dict stripped of useless details. """ if isinstance(model, dict): layer_config = model['config']['layers'] else: layer_config = model._updated_config()['config']['layers'] for l in layer_config: l.pop('name', None) l.get('config', {}).pop('name', None) return layer_config
59bea7504b7a84e68eae051d0963f9d8cd2ed088
101,168
import math def deg2rad(degrees): """ Converts degrees to radians. Args: degrees: A degree value. Returns: The equivalent value in radians. """ return math.pi*degrees/180.0
28b4088f48b02f979c97e9ee3aae52bab793e73a
101,171
import re def from_camelcase(inStr): """Converts a string from camelCase to snake_case >>> from_camelcase('convertToPythonicCase') 'convert_to_pythonic_case' Args: inStr (str): String to convert Returns: String formatted as snake_case """ return re.sub('[A-Z]', lambda x: '_' + x.group(0).lower(), inStr)
73b876e2eab1361c97acd5b4828db2de384a30c1
101,174
def flatten_nested_dict(x, join_str = '/', prefix = ''): """Transforms nested dictionary into a flat dictionary.""" assert isinstance(x, dict) result = {} for k, v in x.items(): key = prefix + join_str + k if isinstance(v, dict): result.update(flatten_nested_dict(v, join_str, key)) else: result[key] = v return result
44ebd4f88b5c0024ea37aa76de2a065cefb293c4
101,177
def flip_keypoints(keypoints): """ Flipped keypoints horizontally (x = -x) """ for i, k in enumerate(keypoints): keypoints[i, 0] = -keypoints[i, 0] return keypoints
2da5e0b3fc2fde35a49b8615fd3fbe8f63fec3f3
101,178
import fnmatch def refineGitignore(contents, files): """ Ignore only those files which are present in the repository """ refinedContents = "" for exp in contents.split('\n'): for file in files: if fnmatch.fnmatch(file, exp): refinedContents += exp + '\n' break return refinedContents
14af785ab70d9970baa6d3ca5c9e578a612389ba
101,179
def dictadd(dict_a, dict_b): """ Returns a dictionary consisting of the keys in `a` and `b`. If they share a key, the value from b is used. >>> dictadd({1: 0, 2: 0}, {2: 1, 3: 1}) {1: 0, 2: 1, 3: 1} """ result = dict(dict_a) result.update(dict_b) return result
59935c5a79280678818926789663f21e13ee4728
101,180
import json def json_points(points): """ Returns a list of points [(lat, lng)...] as a JSON formatted list of strings. >>> json_points([(1,2), (3,4)]) '["1,2", "3,4"]' """ return json.dumps(["{0},{1}".format(point[0], point[1]) for point in points])
812de51e1e593d7f15054a389cd823fe32a96174
101,181
import math def haversine_distance(lat1, lat2, long1, long2): """ :param lat1: Point 1's latitude :param lat2: Point 2's latitude :param long1: Point 1's longitude :param long2: Point 2's longitude Description: The function returns the haversine (big circle) distances from the two points on earth. Does not take into account elevation change. """ radius = 6371000 #Radius of the earth (meters) lat1r = math.radians(lat1) lat2r = math.radians(lat2) long1r = math.radians(long1) long2r = math.radians(long2) deltaLat = lat2r-lat1r deltaLong = long2r - long1r a = (math.sin(deltaLat/2)*math.sin(deltaLat/2)) + math.cos(lat1r)*math.cos(lat2r)*(math.sin(deltaLong)*math.sin(deltaLong)) c = 2*math.atan2(math.sqrt(a), math.sqrt(1-a)) return radius*c
1c6a6345aad7a5404cf3534c5d16f4574de68d03
101,184
def iterable(arg): """ Make an argument iterable :param arg: an argument to make iterable :type: list :return: iterable argument """ if not isinstance(arg, (list, tuple)): return [arg] else: return arg
96bb2433f8c577ede6b2460829f3ba006f9515ef
101,187
def path_replace(path, path_replacements): """Replaces path key/value pairs from path_replacements in path""" for key in path_replacements: path = path.replace(key,path_replacements[key]) return path
bceecaf9e8e48fdbcd948d784c9a7e521fdbc31c
101,195
def transfer_result(fut, errors_only=False, process=None): """ Return a ``done_callback`` that transfers the result, errors or cancellation to the provided future. If errors_only is ``True`` then it will not transfer a successful result to the provided future. .. code-block:: python from photons_app import helpers as hp import asyncio async def my_coroutine(): return 2 fut = hp.create_future() task = hp.async_as_background(my_coroutine()) task.add_done_callback(hp.transfer_result(fut)) assert (await fut) == 2 If process is provided, then when the coroutine is done, process will be called with the result of the coroutine and the future that result is being transferred to. """ def transfer(res): if res.cancelled(): fut.cancel() return exc = res.exception() if fut.done(): return if exc is not None: fut.set_exception(exc) return if not errors_only: fut.set_result(res.result()) if process: process(res, fut) return transfer
f8d22a71cd66e1486c2e5e4d8ca93d7047fe1e26
101,198
def calculate_coverage(filename): """ Calculate translation coverage for a .po file """ with open(filename, 'r') as f: lines = f.readlines() lines_count = 0 lines_covered = 0 lines_uncovered = 0 for line in lines: if line.startswith("msgid "): lines_count += 1 elif line.startswith("msgstr"): if line.startswith('msgstr ""') or line.startswith("msgstr ''"): lines_uncovered += 1 else: lines_covered += 1 # Return stats for the file return (lines_count, lines_covered, lines_uncovered)
706db8b1e65e4f218cd97d98897ce99cbac7b207
101,203
def create_spreadsheet(service, title): """ Create a new spreadsheet. :param service: Google service object :param title: Spreadsheet title :return: Newly created spreadsheet ID """ spreadsheet = { 'properties': { 'title': title } } spreadsheet = service.spreadsheets().create(body=spreadsheet, fields='spreadsheetId').execute() return spreadsheet.get('spreadsheetId')
91dd784ffe4cb37087c1f69085824cb616c264bd
101,205
def normalize_baseuri(baseuri: str) -> str: """Normalize a baseuri If it doesn't end in a slash, add one. """ if baseuri[-1] != "/": return baseuri + "/" return baseuri
9e3938b84e99b49512d85f54bef6f26b3e8796e9
101,208
import time def print_test(method): """ Utility method for print verbalizing test suite, prints out time taken for test and functions name, and status """ def run(*args, **kw): ts = time.time() print('\ttesting function %r' % method.__name__) method(*args, **kw) te = time.time() print('\t[OK] in %r %2.2f sec' % (method.__name__, te - ts)) return run
d92e81c22f7aab9a1227381c9e7f4064ed1ce9cd
101,210
def _make_length_filter_fn(length_name, max_length): """Returns a predicate function which takes in data sample and returns a bool indicating whether to filter by length. """ def _filter_fn(data): return data[length_name] <= max_length return _filter_fn
6e16f62f2b0001930321f94e6ac4c316b8bebeed
101,213
def is_disabled(context, name): """Whether a specific pattern is disabled. The context object might define an inclusion list (includes) or an exclusion list (excludes) A pattern is considered disabled if it's found in the exclusion list or it's not found in the inclusion list and the inclusion list is not empty or not defined. :param context: :param name: :return: """ if not context: return False excludes = context.get('excludes') if excludes and name in excludes: return True includes = context.get('includes') return includes and name not in includes
a1842b6addf0759ca408718af5584494a5ed6637
101,216
import heapq def subtree_nodes_with_edge_length(tree, leaf_y, n): """ Returns list of length n of leaves closest to sister taxon (minimizing edge weights) Parameters ---------- tree : treeswift tree object leaf_y : treeswift node for closest sister taxon n = number of taxa contained in subtree Returns ------- list of taxon labels corresponding to leaves in the subtree """ queue = [(leaf_y.get_edge_length(), leaf_y.get_parent())] leaves = [leaf_y] visited = {leaf_y} while len(leaves) < n: try: (length, node) = heapq.heappop(queue) except IndexError: break visited.add(node) if node.is_leaf(): leaves.append(node) adjacent_nodes = node.child_nodes() if not node.is_root(): adjacent_nodes.append(node.get_parent()) for neighbor in adjacent_nodes: if neighbor not in visited: if neighbor == node.get_parent(): heapq.heappush(queue, (length+node.get_edge_length(), neighbor)) else: heapq.heappush(queue, (length+neighbor.get_edge_length(), neighbor)) result = [] for item in leaves: result.append(item.get_label()) return result
131445433a8254e6acecd72561ee1abca8fc3286
101,221
import codecs def serializer_with_encoder_constructor(serialization_func, encoder_type='utf-8', encoder_error_mode='strict'): """ Wrap a serialization function with string encoding. This is important for JSON, as it serializes objects into strings (potentially unicode), NOT bytestreams. An extra encoding step is needed to get to a bytestream. :param serialization_func: The base serialization function. :param encoder_type: The encoder type. Default: 'utf-8' :param encoder_error_mode: The encode error mode. Default: 'strict'. :return: The serializer function wrapped with specified encoder. :rtype: T -> bytes | bytearray | str """ encoder = codecs.getencoder(encoder_type) def serialize(payload): serialized, _ = encoder(serialization_func(payload), encoder_error_mode) return serialized return serialize
602962b13837dcd11255a731cac48ba0e6dd520f
101,224
def get_resolved_fact_keys(conversation): """ Returns a list of all the resolved facts for a conversation as string keys :param conversation: The current conversation :return: List of all resolved fact names as strings """ return [fact_entity_row.fact.name for fact_entity_row in conversation.fact_entities]
e29cc1178cf72d5168411e036449a3cf0f2c3451
101,226
def rensure(items, count): """Make sure there are `count` number of items in the list otherwise just fill in `None` from the beginning until it reaches `count` items.""" fills = count - len(items) if fills >= 1: return [None] * fills + items return items
d1e9784019f65934227fc657ee99f567ac65e776
101,230
def num_ancestors(p): """ Returns the number of known ancestors of p Does not include p, so the answer might be 0. Parameter p: The initial family member Precondition: p is a Person (and not None) """ # Work on small data (BASE CASE) if p.mom == None and p.dad == None: return 0 # Break up into halves (RECURSIVE CASE) moms = 0 if not p.mom == None: # Do not forget to include mom as well. moms = 1+num_ancestors(p.mom) dads = 0 if not p.dad == None: # Do not forget to include dad as well. dads = 1+num_ancestors(p.dad) # Combine the answer return moms+dads
52f9a9fe32caa03391eae374351b2888edbdbc70
101,234
def get_types(mol): """Returns an array of atomic numbers from an rdkit mol.""" return [mol.GetAtomWithIdx(i).GetAtomicNum() for i in range(mol.GetNumAtoms())]
27f379c4d54ff0d2bb9d9f59de274fb4bfb412c4
101,235
def as_list(tup_list): """ Turns a tuple-list into a list of the first element of the tuple @param tup_list is the tuple-list you are converting @returns the created list """ res = [] for elem in tup_list: res.append(elem[0]) return res
770f5f4e302e1796f6945507c7f314cf157cc77a
101,236
import torch def infer(model, inputs): """ Using a model to infer outputs from inputs. Args: model (Model): a PyTorch model. inputs (Tensor): inputs to the model. Returns: outputs (Tensor): results inferred by the model from inputs. """ # switch to evaluate mode model.eval() with torch.no_grad(): # compute outputs outputs = model(inputs) return outputs
883bcd3cc4ef9167203afe5d34d08286ea6d8cbe
101,239
def union(list1, list2): """ Returns the union of two lists :param list1: first list :param list2: second list :return: A list containing the union over list1 and list2 """ ret = list1[:] for i in list2: if i not in ret: ret.append(i) return ret
14e6dffee90f51f9fb17560b914a0bbcb760a87f
101,240
def column_to_list(data, index): """ Função que retorna a coluna de uma lista de listas como uma lista Argumentos: data: lista de listas index: posição (coluna) à ser acessada e retornada Retorna: Uma lista com os valores da coluna definida através do argumento index """ return [item[index] for item in data]
6fee91ff0130c6c3c1e8f90fb024537a04cce36c
101,242
def reg_tap_cim_to_gld(step, step_voltage_increment): """ :param step: CIM setting of voltage regulator, which is a multiplier of nominal voltage, e.g. 1.0125. :param step_voltage_increment: voltage step as multiplier of nominal voltage, e.g. 0.625 :return: tap position in GridLAB-D speak, e.g. 10 or -2 """ return round((step - 1) * 100 / step_voltage_increment)
b0cef47b6f10d7930587a531d06221e126a63304
101,246
import timeit def measure_time(func): """ Decorator that measures time """ def timer(*args, **kwargs): start = timeit.default_timer() ret = func(*args, **kwargs) end = timeit.default_timer() print("Time[{}] : {}".format(func.__name__, end-start)) return ret return timer
38506dddc4e37f7d68756f12dda860936e53c131
101,249
def comma_sep(value, precision=0): """Convert `int` to #,###.## notation as `str`""" #https://stackoverflow.com/questions/36626017/format-a-number-with-comma-separators-and-round-to-2-decimal-places-in-python-2 return f"{value:,.{precision}f}"
766821b2a735c923e32ce5cff6a4b01353e864e2
101,252
def get_region_id(region): """Return Pure region ID from region code. Example: - Hong Kong -> #1 - Singapore -> #2 - Shanghai/CN -> #4 """ regions = { 'HK': 1, 'SG': 2, 'CN': 4, } assert region in regions, ( 'Region "%s" does not exist.' % region) return regions[region]
cfd993b7105e863f8e98d2930dae518d53d4d653
101,257
import torch def response_preprocessing(responses: torch.Tensor) -> torch.Tensor: """Preprocesses responses Args: responses (Tensor): Response tensor. Returns: Tensor: Preprocessed responses """ # responses = normalize_tensor_by_standard_deviation_devision(responses) return responses
7266b76a406a558ecaf6489a3006e60678e05716
101,258
from typing import Any def calendars_config_entity( calendars_config_track: bool, calendars_config_ignore_availability: bool | None ) -> dict[str, Any]: """Fixture that creates an entity within the yaml configuration.""" entity = { "device_id": "backyard_light", "name": "Backyard Light", "search": "#Backyard", "track": calendars_config_track, } if calendars_config_ignore_availability is not None: entity["ignore_availability"] = calendars_config_ignore_availability return entity
b6872ab94695bd6e7f08fcdea5f9811bc65846d2
101,267
def zip_codes_to_go(target: list, zip_code: list) -> list: """finds the zip codes that haven't been used Args: target (list): the list of zip codes that you want zip_code (list): the list of zip codes that you already have Returns: list: list of zip codes that you still need """ combined = target + zip_code for code in target: if (combined.count(code) > 1): combined = [i for i in combined if i != code] return sorted(combined)
f35ab50d841012d40c1678faa735f63e5f9915f1
101,269
from sympy.printing.fortran import FCodePrinter def fcode(expr, assign_to=None, **settings): """Converts an expr to a string of fortran code Parameters ========== expr : Expr A SymPy expression to be converted. assign_to : optional When given, the argument is used as the name of the variable to which the expression is assigned. Can be a string, ``Symbol``, ``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of line-wrapping, or for expressions that generate multi-line statements. precision : integer, optional DEPRECATED. Use type_mappings instead. The precision for numbers such as pi [default=17]. user_functions : dict, optional A dictionary where keys are ``FunctionClass`` instances and values are their string representations. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, cfunction_string)]. See below for examples. human : bool, optional If True, the result is a single string that may contain some constant declarations for the number symbols. If False, the same information is returned in a tuple of (symbols_to_declare, not_supported_functions, code_text). [default=True]. contract: bool, optional If True, ``Indexed`` instances are assumed to obey tensor contraction rules and the corresponding nested loops over indices are generated. Setting contract=False will not generate loops, instead the user is responsible to provide values for the indices in the code. [default=True]. source_format : optional The source format can be either 'fixed' or 'free'. [default='fixed'] standard : integer, optional The Fortran standard to be followed. This is specified as an integer. Acceptable standards are 66, 77, 90, 95, 2003, and 2008. Default is 77. Note that currently the only distinction internally is between standards before 95, and those 95 and after. This may change later as more features are added. name_mangling : bool, optional If True, then the variables that would become identical in case-insensitive Fortran are mangled by appending different number of ``_`` at the end. If False, SymPy Will not interfere with naming of variables. [default=True] Examples ======== >>> from sympy import fcode, symbols, Rational, sin, ceiling, floor >>> x, tau = symbols("x, tau") >>> fcode((2*tau)**Rational(7, 2)) ' 8*sqrt(2.0d0)*tau**(7.0d0/2.0d0)' >>> fcode(sin(x), assign_to="s") ' s = sin(x)' Custom printing can be defined for certain types by passing a dictionary of "type" : "function" to the ``user_functions`` kwarg. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, cfunction_string)]. >>> custom_functions = { ... "ceiling": "CEIL", ... "floor": [(lambda x: not x.is_integer, "FLOOR1"), ... (lambda x: x.is_integer, "FLOOR2")] ... } >>> fcode(floor(x) + ceiling(x), user_functions=custom_functions) ' CEIL(x) + FLOOR1(x)' ``Piecewise`` expressions are converted into conditionals. If an ``assign_to`` variable is provided an if statement is created, otherwise the ternary operator is used. Note that if the ``Piecewise`` lacks a default term, represented by ``(expr, True)`` then an error will be thrown. This is to prevent generating an expression that may not evaluate to anything. >>> from sympy import Piecewise >>> expr = Piecewise((x + 1, x > 0), (x, True)) >>> print(fcode(expr, tau)) if (x > 0) then tau = x + 1 else tau = x end if Support for loops is provided through ``Indexed`` types. With ``contract=True`` these expressions will be turned into loops, whereas ``contract=False`` will just print the assignment expression that should be looped over: >>> from sympy import Eq, IndexedBase, Idx >>> len_y = 5 >>> y = IndexedBase('y', shape=(len_y,)) >>> t = IndexedBase('t', shape=(len_y,)) >>> Dy = IndexedBase('Dy', shape=(len_y-1,)) >>> i = Idx('i', len_y-1) >>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i])) >>> fcode(e.rhs, assign_to=e.lhs, contract=False) ' Dy(i) = (y(i + 1) - y(i))/(t(i + 1) - t(i))' Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions must be provided to ``assign_to``. Note that any expression that can be generated normally can also exist inside a Matrix: >>> from sympy import Matrix, MatrixSymbol >>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)]) >>> A = MatrixSymbol('A', 3, 1) >>> print(fcode(mat, A)) A(1, 1) = x**2 if (x > 0) then A(2, 1) = x + 1 else A(2, 1) = x end if A(3, 1) = sin(x) """ return FCodePrinter(settings).doprint(expr, assign_to)
8f504707ecc5c7a4951529fa2c45f118be6814db
101,271
from functools import reduce def getChunk(the_list: list, num: int): """ Example: if the_list: ['a', 'b', 'c', 'd', 'e', 'f'] and num: 3 ['a', 'b', 'c', 'd', 'e', 'f'] -> ['a b c', 'b c d', 'c d e', 'd e f'] :param the_list: ['a', 'b', 'c', 'd', 'e', 'f']. :param num: the number of words in a chunk. """ i = range(num) def generate_list(shift: int): """ Example: if shift: 1 ['a', 'b', 'c', 'd', 'e', 'f'] -> ['b', 'c', 'd', 'e', 'f', ''] :param num: the number of words in a chunk. """ if shift == 0: return the_list ox=['']*len(the_list) ox[:-shift]=the_list[shift:] return ox # Create shifted lists from a given list r = list(map(generate_list,i)) # Combine shifted lists into a list containing chunks combined = list(reduce(lambda x,y: map(lambda i,j: i + ' ' + j,x,y),r)) # Cut list ending = combined[:-(num-1)] if num > 1 else combined return ending
5f4f80e5b2a3dcf17adaa3ff05b63c7eaab16edf
101,273
def import_dfg_from_rows(rows, parameters=None): """ Import a DFG (along with the start and end activities) from the rows of a .dfg file Parameters -------------- rows Rows the DFG file parameters Possible parameters of the algorithm Returns -------------- dfg DFG start_activities Start activities end_activities End activities """ if parameters is None: parameters = {} activities = [] start_activities = {} end_activities = {} dfg = {} num_activities = int(rows[0]) i = 1 while i <= num_activities: activities.append(rows[i].strip()) i = i + 1 num_sa = int(rows[i]) target = i + num_sa i = i + 1 while i <= target: act, count = rows[i].strip().split("x") act = activities[int(act)] count = int(count) start_activities[act] = count i = i + 1 num_ea = int(rows[i]) target = i + num_ea i = i + 1 while i <= target: act, count = rows[i].strip().split("x") act = activities[int(act)] count = int(count) end_activities[act] = count i = i + 1 while i < len(rows): acts, count = rows[i].strip().split("x") count = int(count) a1, a2 = acts.split(">") a1 = activities[int(a1)] a2 = activities[int(a2)] dfg[(a1, a2)] = count i = i + 1 return dfg, start_activities, end_activities
62216287a866ce4426cb1ba6b64ddbeab860d3d0
101,274
def get_opcode(instruction_bytes): """ Returns the 6-bit MIPS opcode from a 4 byte instruction. """ op = (instruction_bytes & 0xFC000000) >> (3 * 8 + 2) return op
85dab0a35b0e31a3f2f2b5c1f3d5323b1ccf3dae
101,276
def slice_data(data, sub, block, subcond=None): """ pull symmetric matrix from data block (4D or 5D) Parameters ---------- data : numpy array 4D array (block, sub, nnode, nnode) 5D array (subcond, block, sub, nnode, nnode) sub : int int representing subject to index in data block : int int representing block to index in data subcond : int int representing optional subcondition from 5D array Returns ------- adjacency_matrix : numpy array symmetric numpy array (innode, nnode) """ if subcond is None: return data[block, sub] return data[subcond, block, sub]
294d10c2d6c681dbb801ccfe6d5e5a4dd259adf1
101,277
def A000290(n: int) -> int: """Squares numbers: a(n) = n^2.""" return n ** 2
97849016dd6ec2f24dc173e059a9e69f25652d4a
101,278
def pad_gtin(app_identifier, value): """ Pad the value of any GTIN [ AI (01) or (02) ] to 14 digits in the element string representation. :param app_identifier: Application identifier. :param value: The GTIN string - can be 8, 12 or 13 digits. :return: GTIN string with zeros padded to the left, and will be 14-digit. """ new_value = value if app_identifier in ["01", "(01)", "02", "(02)"]: if len(value) == 8: new_value = ''.join(['000000', value]) elif len(value) == 12: new_value = ''.join(['00', value]) elif len(value) == 13: new_value = ''.join(['0', value]) return new_value
5463e9ffd1b974b3d0376f481b7baf5862d431ed
101,280
def create_url(event_id: int) -> str: """ Return the correct link to the event by its id :param event_id: id of the event :return: link to the event """ return f'https://olimpiada.ru/activity/{event_id}'
afa3443a158d00b9480adb358a16fc37125ec9de
101,282
def oposto(a): """ Faz a negação do literal de acordo com os moldes do algoritmo. :param a: literal :return: negação do literal """ if a[0] == '~': aux = a[1:] else: aux = ''.join(['~', a]) return aux
d32a6b428ea276d11f85d6df5a17f7bef280327e
101,290
def read_requirements_file(filename): """Read pip-formatted requirements from a file.""" with open(filename, 'r') as f: return [line.strip() for line in f.readlines() if not line.startswith('#')]
3e72cadf3dd2aa61de023861e4d7b6ec448b90b0
101,292
import base64 def uuid_to_b32(uuid_obj): """ Convert UUID object to a b32 string :param uuid_obj: UUID object :return: b32 string """ return base64.b32encode(uuid_obj.bytes).decode().replace("=", "").lower()
8f4ae816b1cb08967470b2913cf5ee34e2a8e440
101,295
def compare(name, first, second, bfr): """Differentiate 'repeat..until' from '*..end' brackets.""" brackets = (bfr[first.begin:first.end], bfr[second.begin:second.end]) return (brackets[0] == "repeat") ^ (brackets[1] == "end")
277aaf22e452c7d5c7292ccf233d08b21d3bb90e
101,301
def _wrap_close_transport(channel, transport): """ Wrap a Channel or SFTPFile object so its close() method also closes the underlying Transport. """ def close(): """ Close this object and the underlying Transport """ channel._orig_close() # pylint: disable=protected-access transport.close() if not hasattr(channel, '_orig_close'): setattr(channel, '_orig_close', channel.close) setattr(channel, 'close', close) return channel
064c570e329fa2841cbdeffa941e83b9225a9fd3
101,306
def conv_output_length(input_length, filter_size, stride, pad=0): """Helper function to compute the output size of a convolution operation This function computes the length along a single axis, which corresponds to a 1D convolution. It can also be used for convolutions with higher dimensionalities by using it individually for each axis. Parameters ---------- input_length : int or None The size of the input. filter_size : int The size of the filter. stride : int The stride of the convolution operation. pad : int, 'full' or 'same' (default: 0) By default, the convolution is only computed where the input and the filter fully overlap (a valid convolution). When ``stride=1``, this yields an output that is smaller than the input by ``filter_size - 1``. The `pad` argument allows you to implicitly pad the input with zeros, extending the output size. A single integer results in symmetric zero-padding of the given size on both borders. ``'full'`` pads with one less than the filter size on both sides. This is equivalent to computing the convolution wherever the input and the filter overlap by at least one position. ``'same'`` pads with half the filter size on both sides (one less on the second side for an even filter size). When ``stride=1``, this results in an output size equal to the input size. Returns ------- int or None The output size corresponding to the given convolution parameters, or ``None`` if `input_size` is ``None``. Raises ------ ValueError When an invalid padding is specified, a `ValueError` is raised. """ if input_length is None: return None if pad == 'valid': output_length = input_length - filter_size + 1 elif pad == 'full': output_length = input_length + filter_size - 1 elif pad == 'same': output_length = input_length elif isinstance(pad, int): output_length = input_length + 2 * pad - filter_size + 1 else: raise ValueError('Invalid pad: {0}'.format(pad)) # This is the integer arithmetic equivalent to # np.ceil(output_length / stride) output_length = (output_length + stride - 1) // stride return output_length
43a563cf422e6ba4f7989f9b55a4d17ca9c5b215
101,308
import inspect def get_functions(mod): """ Get a list of functions for the given module. :param mod: The module to inspect for functions. :type mod: types.ModuleType :return: The list of functions. :rtype: list[types.FunctionType] """ return [o[0] for o in inspect.getmembers(mod, inspect.isfunction)]
78a7807760086b65aefdc45be7c7c39d95e5ba9d
101,316
def flatten_card(ast): """Flatten a card AST node into a string.""" string = ast.front_demarcator_token.text \ + ast.front_text \ + ast.back_demarcator_token.text \ + ast.back_text return string
627d17de37313f12a7e65e8694ebfbcac83c8f0a
101,317
import requests def api_get_courses(srcdb): """ Return the API response for a search for all CU Boulder courses. """ url = "https://classes.colorado.edu/api/?page=fose&route=search" data = { "other": { "srcdb": srcdb, }, "criteria": [] } resp = requests.post(url, json=data) resp.raise_for_status() return resp.json()
461899123c0f03674b66fef39063b7df9081e3c9
101,320
import glob def find_files(pattern): """Find files matching pattern""" return glob.iglob(pattern)
3cd1ffedad56c98bdafd4aa1bdde4fd3a3f9a53b
101,323
from typing import TextIO from typing import List def parse_tsv(file: TextIO) -> List[List[str]]: """Parse a TSV file into a list of lists of strings.""" return [ list(line.strip().split('\t')) for line in file ]
ddfab5ab3fff3318f316f161855ee28d93d53c2a
101,327
import random def choose(argv): """Randomly choose one of multiple things. Usage: choose <thing1> <thing2> [thing3] ...""" if len(argv) > 2: return 'I choose ' + random.choice(argv[1:])
14c82ff4b740bdb1016f484aa32d46444b41484c
101,333
import re def normalize_hostname_to_rfc(mystr): """ Given a hostname, normalize to Nextdoor hostname standard Args: mystr (str): hostname to normalize Returns: normalized hostname Details: * lower everthing * delete anything which is not alphanumeric * compress multiple '.' or '-' * strip leading '-'s """ return re.sub('^[-]+', '', re.sub('[.]{2,}', '.', re.sub('[-]{2,}', '-', re.sub('[^a-z0-9-._]', '', mystr.lower()))))
ce6d6fd07ba12ac28f0d96bb4ee3dc0345e7bcd4
101,340
def is_aba(aba_str): """Returns true if 3 character string is of the form ABA""" if len(aba_str) != 3: raise Exception return aba_str[0] == aba_str[2] and aba_str[0] != aba_str[1]
cbd5b463ae5af9816047811f1ebaae59c4e7cbe7
101,341
def _transitions(state, policies): """Returns a list of (action, prob) pairs from the specified state.""" if state.is_chance_node(): return state.chance_outcomes() else: pl = state.current_player() return list(policies[pl].action_probabilities(state).items())
cdbba6b2e7ea6d25e89d0132c326d9b7147d9788
101,344
import json def load_json_file(filepath): """Load content of json-file from `filepath`""" with open(filepath, 'r') as json_file: return json.load(json_file)
578575a6398c3e803613b8b4277043bcab08191c
101,348
def echo(s): """repeats a string twice""" return s + s
534fbf0464261a35e929518c245dc3e32e09977a
101,351
def build_transcript(transcript, build='37'): """Build a transcript object These represents the transcripts that are parsed from the VCF, not the transcript definitions that are collected from ensembl. Args: transcript(dict): Parsed transcript information Returns: transcript_obj(dict) """ # Transcripts has to have an id transcript_id = transcript['transcript_id'] transcript_obj = dict( transcript_id = transcript_id ) # Transcripts has to belong to a gene transcript_obj['hgnc_id'] = transcript['hgnc_id'] if transcript.get('protein_id'): transcript_obj['protein_id'] = transcript['protein_id'] if transcript.get('sift_prediction'): transcript_obj['sift_prediction'] = transcript['sift_prediction'] if transcript.get('polyphen_prediction'): transcript_obj['polyphen_prediction'] = transcript['polyphen_prediction'] if transcript.get('swiss_prot'): transcript_obj['swiss_prot'] = transcript['swiss_prot'] if transcript.get('pfam_domain'): transcript_obj['pfam_domain'] = transcript.get('pfam_domain') if transcript.get('prosite_profile'): transcript_obj['prosite_profile'] = transcript.get('prosite_profile') if transcript.get('smart_domain'): transcript_obj['smart_domain'] = transcript.get('smart_domain') if transcript.get('biotype'): transcript_obj['biotype'] = transcript.get('biotype') if transcript.get('functional_annotations'): transcript_obj['functional_annotations'] = transcript['functional_annotations'] if transcript.get('region_annotations'): transcript_obj['region_annotations'] = transcript['region_annotations'] if transcript.get('exon'): transcript_obj['exon'] = transcript.get('exon') if transcript.get('intron'): transcript_obj['intron'] = transcript.get('intron') if transcript.get('strand'): transcript_obj['strand'] = transcript.get('strand') if transcript.get('coding_sequence_name'): transcript_obj['coding_sequence_name'] = transcript['coding_sequence_name'] if transcript.get('protein_sequence_name'): transcript_obj['protein_sequence_name'] = transcript['protein_sequence_name'] transcript_obj['is_canonical'] = transcript.get('is_canonical', False) return transcript_obj
485a8f3bee6030c7a66f50be2fbd1a4c1acf424b
101,352
def join_ensembles(ensemble_list): """Join several ensembles using a set theory union. Parameters ---------- ensemble_list : list of :class:`.Ensemble` list of ensembles to join Returns ------- :class:`.Ensemble` union of all given ensembles """ ensemble = None for ens in ensemble_list: if ensemble is None: ensemble = ens else: ensemble = ensemble | ens return ensemble
04ec37bcb1c3013b86c25a379f91b88692600361
101,354
def fixStringLength(s, n, ctd='...', alignRight = True): """ Forces a string into a space of size `n`, using continuation character `ctd` to indicate truncation """ try: return ( s[:(n-len(ctd))] + ctd if len(s) > n else s.rjust(n) if alignRight else s.ljust(n) ) except (AttributeError, TypeError, ValueError): raise AssertionError('Input should be a string')
dfb927f5b5861a5153279817fb14a4496bab2bbb
101,356
def _base_model_from_kwargs(cls, kwargs): """Helper for BaseModel.__reduce__, expanding kwargs.""" return cls(**kwargs)
bd7a991c891172cac334541b9ccee82c5489f4ad
101,362
def get_requires(filename): """ Function used to read the required dependencies (e.g. in 'requirements.txt') """ requires = [] with open(filename, "rt") as req_file: for line in req_file: requires.append(line.rstrip()) return requires
4c4df470846a2812e79fe5349371c287b3a5ea95
101,363
def _build_synset_lookup(imagenet_metadata_file): """Build lookup for synset to human-readable label. Args: imagenet_metadata_file: string, path to file containing mapping from synset to human-readable label. Assumes each line of the file looks like: n02119247 black fox n02119359 silver fox n02119477 red fox, Vulpes fulva where each line corresponds to a unique mapping. Note that each line is formatted as <synset>\t<human readable label>. Returns: Dictionary of synset to human labels, such as: 'n02119022' --> 'red fox, Vulpes vulpes' """ lines = open(imagenet_metadata_file, 'r').readlines() synset_to_human = {} for l in lines: if l: parts = l.strip().split('\t') assert len(parts) == 2 synset = parts[0] human = parts[1] synset_to_human[synset] = human return synset_to_human
afa342c78cb0db96e3c8eafdda6ee12b19b9ed1f
101,366
def get_seeds_from_cache(testdir, salt): """Run ``pytest --cache-show`` and parse the outcome to get cached seeds.""" result = testdir.runpytest("--cache-show") fmt = "test_seed[{param}]:{salt} contains:" ix_a = result.outlines.index(fmt.format(param="a", salt=salt)) seed_a = int(result.outlines[ix_a + 1].strip()) ix_b = result.outlines.index(fmt.format(param="b", salt=salt)) seed_b = int(result.outlines[ix_b + 1].strip()) return seed_a, seed_b
0dbbfdeb5bc3b279b7b587cbd84c035047159c7f
101,370
def pk_verify(hashed, signature, public_key): """ Verify `hashed` based on `signature` and `public_key`. hashed: string A hash for the data that is signed. signature: tuple Value returned by :func:`pk_sign`. public_key: :class:`Crypto.PublicKey.RSA` Public portion of key pair. """ return public_key.verify(hashed, signature)
95d0288d5ed85cb3501f87b4ab83650535c818b0
101,375
import textwrap def shorten(text, width=70, placeholder='...'): """Shortens text to a max length. Shortens text to a max length using some optional placeholder with textwrap (if Python > 3.3) or a custom method. :param text: The text to shorten :type text: str :param width: The max width, defaults to 70 :type width: number, optional :param placeholder: The placeholder to truncate with, defaults to '...' :type placeholder: str, optional :returns: The shortened text with placeholder :rtype: {str} """ try: textwrap.indent except AttributeError: # function wasn't added until Python 3.3 return (text if len(text) <= width else text[:width - len(placeholder)] + placeholder) else: return textwrap.shorten(text, width=width, placeholder=placeholder)
359ce44ece03e1eca9ac46ef71f77d6cdc241247
101,377
import yaml def show(data): """ Works like yaml.dump(), but with output suited for doctests. Flow style is always off, and there is no blank line at the end. """ return yaml.dump(data, default_flow_style=False).strip()
f8f7211c9721a7e030e2b8d934e92b4425735af6
101,380
def encontrar_estrenos(p1: dict, p2: dict, p3: dict, p4: dict, p5: dict, anio: int) -> str: """Busca entre las peliculas cuales tienen como anio de estreno una fecha estrictamente posterior a la recibida por parametro. Parametros: p1 (dict): Diccionario que contiene la informacion de la pelicula 1. p2 (dict): Diccionario que contiene la informacion de la pelicula 2. p3 (dict): Diccionario que contiene la informacion de la pelicula 3. p4 (dict): Diccionario que contiene la informacion de la pelicula 4. p5 (dict): Diccionario que contiene la informacion de la pelicula 5. anio (int): Anio limite para considerar la pelicula como estreno. Retorna: str: Una cadena con el nombre de la pelicula estrenada posteriormente a la fecha recibida. Si hay mas de una pelicula, entonces se retornan los nombres de todas las peliculas encontradas separadas por comas. Si ninguna pelicula coincide, retorna "Ninguna". """ # Se establece la cadena vacía de los string a retornar con el nombre de la(s) pelicula(s) estrenada(s) # posteriormente a la fecha recibida. peliculas_posteriores = "" pel1 = "" pel2 = "" pel3 = "" pel4 = "" pel5 = "" # Se extraen los años de estreno de las películas. anio1 = p1["anio"] anio2 = p2["anio"] anio3 = p3["anio"] anio4 = p4["anio"] anio5 = p5["anio"] # Se comparan si los anios son posteriores o no a la fecha recibida. if anio1 > anio: pel1 = p1["nombre"] + ", " if anio2 > anio: pel2 = p2["nombre"] + ", " if anio3 > anio: pel3 = p3["nombre"] + ", " if anio4 > anio: pel4 = p4["nombre"] + ", " if anio5 > anio: pel5 = p5["nombre"] + ", " # Se prepara la cadena final if pel1 != "": peliculas_posteriores += pel1 if pel2 != "": peliculas_posteriores += pel2 if pel3 != "": peliculas_posteriores += pel3 if pel4 != "": peliculas_posteriores += pel4 if pel5 != "": peliculas_posteriores += pel5 # Eliminar coma y esapcio final si no le sigue nada peliculas_posteriores = peliculas_posteriores[:-2] if peliculas_posteriores == "": peliculas_posteriores = "Ninguna" return peliculas_posteriores
26d300db18776d9e6fd12fce46e48296b5333186
101,386
def is_typed_dict(obj, typ): """Returns true if obj is a dict and has a field 'type'=typ. """ return (type(obj) is dict and obj.get('type', None) == typ)
f62c2707899f5e11549fbc10dbbf8f95e27bc8da
101,390
import uuid def CreateNewSessionID() -> str: """Create a new session ID. Returns: str: The session ID. """ return uuid.uuid4().hex[:6]
c39654d4aa2c2cafbf25ff5b3afcb5ccdc314ac2
101,393
def loadavg () : """ Get load average from /proc/loadavg The first three fields in this file are load average figures giving the number of jobs in the run queue (state R) or waiting for disk I/O (state D) averaged over 1, 5, and 15 minutes. They are the same as the load average numbers given by uptime(1) and other programs. We use the one-minute average. """ with open ('/proc/loadavg') as f : l = f.readline () a1m = float (l.split () [0]) return a1m
fdc3e0a8e1b6d8b711b7b81e147700502b527a3a
101,395
def _login_manager_user_loader(user_id): """ setup None user loader. Without this, it will throw an error if it doesn't exist """ return None
e94f7dd2c86f42b575456bf1cfe7970861578cc3
101,397
import optparse def CreateTestRunnerOptionParser(usage=None, default_timeout=60): """Returns a new OptionParser with arguments applicable to all tests.""" option_parser = optparse.OptionParser(usage=usage) option_parser.add_option('-t', dest='timeout', help='Timeout to wait for each test', type='int', default=default_timeout) option_parser.add_option('-c', dest='cleanup_test_files', help='Cleanup test files on the device after run', action='store_true', default=False) option_parser.add_option('-v', '--verbose', dest='verbose_count', default=0, action='count', help='Verbose level (multiple times for more)') option_parser.add_option('--tool', dest='tool', help='Run the test under a tool ' '(use --tool help to list them)') return option_parser
a98d8e3997df2604432dd051256399bea367fd30
101,400
def cudnn_lstm_parameter_size(input_size, hidden_size): """Number of parameters in a single CuDNN LSTM cell.""" biases = 8 * hidden_size weights = 4 * (hidden_size * input_size) + 4 * (hidden_size * hidden_size) return biases + weights
1548e9ba8358939edb6d5fb1e7dcf5f4a3b2302e
101,402
def get_indefinite_article(noun: str) -> str: """ >>> get_indefinite_article('Elephant') 'an' >>> get_indefinite_article('lion') 'a' >>> get_indefinite_article(' ant') 'an' """ normalised_noun = noun.lower().strip() if not normalised_noun: return 'a' if normalised_noun[0] in 'aeiou': return 'an' else: return 'a'
bb77a18bb5e60e31a0d68eae921ea8aefb0ebf1b
101,410
def clean_consecutive_duplicates( move_data, subset=None, keep="first", inplace=False ): """ Removes consecutives duplicate rows of the Dataframe, optionally only certaind columns can be consider. Parameters ---------- move_data : dataframe The input trajectory data subset : Array of Strings, optional(None by default) Specifies Column label or sequence of labels, considered for identifying duplicates. By default all columns are used. keep : String. Optional(first by default) Determine wich duplicate will be removed. if keep is set as first, all the duplicates except for the first occurrence will be droped. Otherwise, all duplicates except for the last occurrence will be droped. inplace : boolean, optional(False by default) if set to true the original dataframe will be altered, the duplicates will be droped in place, otherwise a copy will be returned. Returns ------- move_data : dataframe or None The filtered trajectories points without consecutive duplicates. """ if keep == "first": n = 1 else: n = -1 if subset is None: filter_ = (move_data.shift(n) != move_data).any(axis=1) else: filter_ = (move_data[subset].shift(n) != move_data[subset]).any(axis=1) return move_data.drop(index=move_data[~filter_].index, inplace=inplace)
7f1d4258810c7c5bc117d2b5b2d098abffd4b613
101,414
def is_number(number): """ Return True if an object is a number - can be converted into a float. Parameters ---------- number : any Returns ------- bool True if input is a float convertable (a number), False otherwise. """ try: float(number) return True except ValueError: return False
94116ce4f005f747222ced537168d2138944e325
101,417
def rotate_word(s, n): """ Rotate each char in a string by the given amount. Wrap around to the beginning (if necessary). """ rotate = '' for c in s: start = ord('a') num = ord(c) - start r_num = (num + n) % 26 + start r_c = chr(r_num) rotate += r_c return rotate
d2fd72d289b96693eb9e7a2697ab4a8a0a28a0be
101,419
def sigma_eaton(es_norm, v_ratio, n): """ calculate effective pressure with the ratio of velocity and normal velocity Notes ----- .. math:: {\\sigma}={\\sigma}_{n}\\left(\\frac{V}{V_{n}}\\right)^{n} """ return es_norm * (v_ratio)**n
708b59edebd6ea14dffbdb59d91d05ebfae0d8b2
101,420