content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def mesh_split_face(mesh, fkey, u, v): """Split a face by inserting an edge between two specified vertices. Parameters ---------- mesh : :class:`~compas.datastructures.Mesh` Instance of a mesh fkey : :obj:`str` The face key. u : hashable The key of the first split vertex. v : hashable The key of the second split vertex. Returns ------- :obj:`tuple` of :obj:`int` Keys of the created faces. Raises ------ :exc:`ValueError` If the split vertices does not belong to the split face or if the split vertices are neighbors. Examples -------- >>> import compas >>> from compas.datastructures import Mesh >>> mesh = Mesh.from_obj(compas.get("faces.obj")) >>> fkey = mesh.get_any_face() >>> # u and v defines the new edge after splitting >>> u = mesh.get_any_face_vertex(fkey) >>> v = mesh.face_vertex_descendant(fkey, u, n=2) >>> mesh.number_of_faces() # faces before split 25 >>> mesh_split_face(mesh, fkey, u, v) (25, 26) >>> mesh.number_of_faces() # faces after split 26 """ if u not in mesh.face[fkey] or v not in mesh.face[fkey]: raise ValueError('The split vertices do not belong to the split face.') face = mesh.face[fkey] i = face.index(u) j = face.index(v) if i + 1 == j: raise ValueError('The split vertices are neighbors.') if j > i: f = face[i:j + 1] g = face[j:] + face[:i + 1] else: f = face[i:] + face[:j + 1] g = face[j:i + 1] f = mesh.add_face(f) g = mesh.add_face(g) del mesh.face[fkey] return f, g
d6f0dfddaf000374e0a7eb7e70100d35a3db1c82
58,259
import pathlib import hashlib def create_file_hash(file_path: pathlib.Path) -> str: """ Create a hash of a file. Replicates function of ``sha365sum`` linux command. Args: file_path: Path of file to hash. Returns: Hex digest of file SHA. """ this_hash = hashlib.sha256() file_size = file_path.stat().st_size with file_path.open(mode="rb") as f: while f.tell() != file_size: this_hash.update(f.read(0x40000)) return this_hash.hexdigest()
fb147c20fa971e7b8f6d3fb40c5b09963fabc022
58,260
def check_if_modem_enabled(out): """Given an output, checks if the modem is in the correct mode""" for line in out: if '12d1:1446' in line: return 0 if '12d1:1001' in line: return 1 return -1
1d7a0bbdd038240a9e5ca5f1bfa9a9555867f855
58,263
def pentagonal(n: int) -> int: """Find the number of dots in nth pentagonal number.""" # Find the pentagonal number to nth degree. pentagonal_number = (n * ((3 * n) - 1) // 2) # Find the total number of dots. dots = ((n-1) ** 2) dots += pentagonal_number return dots
8e26ca47281f39af565a6817cc960235c09c6878
58,270
def save_analytics_action_doc_template_values(url_root): """ Show documentation about saveAnalyticsAction """ required_query_parameter_list = [ { 'name': 'voter_device_id', 'value': 'string', # boolean, integer, long, string 'description': 'An 88 character unique identifier linked to a voter record on the server', }, { 'name': 'api_key', 'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string 'description': 'The unique key provided to any organization using the WeVoteServer APIs', }, { 'name': 'action_constant', 'value': 'integer', # boolean, integer, long, string 'description': 'There is a constant for each kind of action:\n' 'ACTION_VOTER_GUIDE_VISIT = 1\n' 'ACTION_VOTER_GUIDE_ENTRY = 2\n' 'ACTION_ORGANIZATION_FOLLOW = 3\n' 'ACTION_ORGANIZATION_AUTO_FOLLOW = 4\n' 'ACTION_ISSUE_FOLLOW = 5\n' 'ACTION_BALLOT_VISIT = 6\n' 'ACTION_POSITION_TAKEN = 7\n' 'ACTION_VOTER_TWITTER_AUTH = 8\n' 'ACTION_VOTER_FACEBOOK_AUTH = 9\n' 'ACTION_WELCOME_ENTRY = 10\n' 'ACTION_FRIEND_ENTRY = 11', }, ] optional_query_parameter_list = [ { 'name': 'organization_we_vote_id', 'value': 'string', # boolean, integer, long, string 'description': 'The unique identifier for this organization across all networks ', }, { 'name': 'google_civic_election_id', 'value': 'integer', # boolean, integer, long, string 'description': 'The unique identifier for a particular election.', }, { 'name': 'ballot_item_we_vote_id', 'value': 'string', # boolean, integer, long, string 'description': 'The we_vote_id for the ballot item we are storing analytics for. ', }, ] potential_status_codes_list = [ { 'code': 'VALID_VOTER_DEVICE_ID_MISSING', 'description': 'Cannot proceed. A valid voter_device_id parameter was not included.', }, { 'code': 'VALID_VOTER_ID_MISSING', 'description': 'Cannot proceed. A valid voter_id was not found.', }, ] try_now_link_variables_dict = { # 'organization_we_vote_id': 'wv85org1', } api_response = '{\n' \ ' "status": string,\n' \ ' "success": boolean,\n' \ ' "voter_device_id": string (88 characters long),\n' \ ' "action_constant": integer,\n' \ ' "state_code": string,\n' \ ' "is_signed_in": boolean,\n' \ ' "google_civic_election_id": integer,\n' \ ' "organization_we_vote_id": string,\n' \ ' "organization_id": integer,\n' \ ' "ballot_item_we_vote_id": string,\n' \ ' "date_as_integer": integer,\n' \ '}' template_values = { 'api_name': 'saveAnalyticsAction', 'api_slug': 'saveAnalyticsAction', 'api_introduction': "", 'try_now_link': 'apis_v1:saveAnalyticsActionView', 'try_now_link_variables_dict': try_now_link_variables_dict, 'url_root': url_root, 'get_or_post': 'GET', 'required_query_parameter_list': required_query_parameter_list, 'optional_query_parameter_list': optional_query_parameter_list, 'api_response': api_response, 'api_response_notes': "", 'potential_status_codes_list': potential_status_codes_list, } return template_values
8f7e1c256df5f94acb4b9fb227d646d4e5252557
58,274
def labeller(landmarkables, group_label, label_func): """ Takes a list of landmarkable objects and a group label indicating which set of landmarks should have semantic meaning attached to them. The labelling function will add a new landmark group to each object that have been semantically annotated. Parameters ---------- landmarkables: list of :class:`menpo.landmark.base.Landmarkable` List of landmarkable objects group_label: string The group label of the landmark group to apply semantic labels to. label_func: func A labelling function taken from this module. ``func`` should take a :class:`menpo.landmark.base.LandmarkGroup` and return a new LandmarkGroup with semantic labels applied. Returns ------- landmarkables : list of :class:`menpo.landmark.base.Landmarkable` The list of modified landmarkables (this is just for convenience, the list will actually be modified in place) """ landmark_groups = [label_func(landmarkable.landmarks[group_label]) for landmarkable in landmarkables] for (lanmarkable, group) in zip(landmarkables, landmark_groups): lanmarkable.landmarks[group.group_label] = group return landmarkables
5889e9212fe9974afe6e19e18e8e586a1348d043
58,275
def hum_ratio_from_p_w_and_p(p_w, p): """ Calculate humidity ratio from water vapor pressure and atmospheric pressure Eq(22) in "CHAPTER 6 - PSYCHROMETRICS" in "2001 ASHRAE Fundamentals Handbook (SI)" :param p_w: water vapor pressure [Pa] :type p_w: double :param p: atmospheric pressure [Pa] :type p: double :return: humidity ratio [g / kg dry air] :rtype: double """ return 0.62198 * p_w/(p-p_w)
d2126ee4dcb7ebb81a4918cb9939abc2473f4c18
58,276
def in_d8(idx0, idx_ds, ncol): """Returns True if inside 3x3 (current and 8 neighboring) cells.""" cond1 = abs(idx_ds % ncol - idx0 % ncol) <= 1 # west - east cond2 = abs(idx_ds // ncol - idx0 // ncol) <= 1 # south - north return cond1 and cond2
69e2f522db8f7c5cf57167a82d41226e6d7b35af
58,278
from typing import Dict from typing import Any def clear_per_step_extra_state(extra_state: Dict[str, Any]) -> Dict[str, Any]: """ Clear values in extra_state that are technically only true for a specific step (ex: the eval tune loss calculated after 5 train steps is no longer accurate after 7 train steps, but might not get updated since we might not be doing eval after every step). """ extra_state["tune_eval"]["loss"] = None extra_state["tune_eval"]["perplexity"] = None extra_state["tune_bleu"]["current"] = None return extra_state
d7f561c612a8d52e1cf8a215b414e5c8297c6a24
58,279
import re def RemoveTestCounts(output): """Removes test counts from a Google Test program's output.""" output = re.sub(r'\d+ tests, listed below', '? tests, listed below', output) output = re.sub(r'\d+ FAILED TESTS', '? FAILED TESTS', output) output = re.sub(r'\d+ tests from \d+ test cases', '? tests from ? test cases', output) output = re.sub(r'\d+ tests from ([a-zA-Z_])', r'? tests from \1', output) return re.sub(r'\d+ tests\.', '? tests.', output)
d687e46dda00b1730685db2853ff5b3052b99fd1
58,286
def horizontal_speed(distance, seconds): """compute integer speed for a distance traveled in some number of seconds""" return(int(3600.0 * distance / seconds))
4a3098d54c768301e1d6f66c8b69e1df53724aa4
58,289
def _downsample(raw, frequency): """ Downsample data using MNE's built-in resample function """ raw_downsampled = raw.copy().resample(sfreq=frequency, verbose=True) return raw_downsampled
65034c4331bceae109aca52c713a5d11146fc987
58,293
def num_valid_segments(lower_bound, upper_bound, clip_length): """calculate the number of valid video clips in the video Args: - lower_bound (int): denotes the earliest frame in video that can be segmented - upper_bound (int): denotes the latest frame + 1 in video that can be segmented - clip_length (int): length of each clip Returns: - int: number of valid clips in th evideo """ return upper_bound - clip_length - lower_bound
c030175af2ba20d90a142eae2b03eaa08012c211
58,298
def remove_comment_from_disasm(disasm): """Remove comment from disassembly. Args: disasm: (str) disassembly of the current instruction. Returns: New disassembly after removing comment. """ if ";" in disasm: return disasm.split(";")[0] return disasm
d8b491f6820fb387638403d8fd3f58751a397396
58,304
import io import base64 def decode_b64_img(img_code: str) -> io.BytesIO: """ Returns a decoded image from base64 """ img = img_code img_output = io.BytesIO(base64.b64decode(img)) return img_output
2ee19e604999c217197db9a319c6ea86940d8fe3
58,308
import warnings def scale_to_bounds(x, lower_bound, upper_bound): """ DEPRECATRED: Use :obj:`~gpytorch.utils.grid.ScaleToBounds` instead. :param x: the input data :type x: torch.Tensor (... x n x d) :param float lower_bound: lower bound of scaled data :param float upper_bound: upper bound of scaled data :return: scaled data :rtype: torch.Tensor (... x n x d) """ warnings.warn( "The `scale_to_bounds` method is deprecated. Use the `gpytorch.utils.grid.ScaleToBounds` module instead.", DeprecationWarning, ) # Scale features so they fit inside grid bounds min_val = x.min() max_val = x.max() diff = max_val - min_val x = (x - min_val) * (0.95 * (upper_bound - lower_bound) / diff) + 0.95 * lower_bound return x
41b77fdf1399c1ea9d55b9ff7535e92953dd165c
58,312
def is_number(value): """ This function checks to see if the value can be converted to a number """ try: float(value) return True except: return False
92481e2fbdb3a2826f522f021eaab2961bc8da91
58,314
def rankPerNode(ranking): """ Parameters ---------- ranking: ordered list of tuples (node, score) Returns _______ for each node (sorted by node ID), the ranking of the node """ n_nodes = len(ranking) ranking_id = [0]*n_nodes for index, pair in enumerate(ranking): ranking_id[pair[0]] = index #we assign to all nodes the ranking of the first node with the same score for index, pair in enumerate(ranking): if index == 0: continue if pair[1] == ranking[index-1][1]: prev_node = ranking[index-1][0] ranking_id[pair[0]] = ranking_id[prev_node] return ranking_id
e82fd5e0367441da842ae4d7cfe7761287e40119
58,315
def clean_lesson_content(content): """ Remove items such as new lines and multiple spaces. """ content = content.replace("\n", " ") while " " in content: content = content.replace(" ", " ") return content.strip()
ed609a9b6bdf31b7ffd07a185ee5cc64ad5746f8
58,318
def canonical_message_builder(content, fmt): """ Builds the canonical message to be verified. Sorts the fields as a requirement from AWS Args: content (dict): Parsed body of the response fmt (list): List of the fields that need to go into the message Returns (str): canonical message """ m = "" for field in sorted(fmt): try: m += field + "\n" + content[field] + "\n" except KeyError: # Build with what you have pass return bytes(m, 'utf-8')
18ac12bdd92d0d41bc06384517705433b427b9ee
58,322
from typing import Optional from typing import Dict from typing import List def find_skip_tags(params: Optional[Dict[str, str]] = None) -> List[str]: """Returns a list of tags passed in params that should be excluded from the build.""" if params is None: params = {} tags = params.get("skip_tags", []) if isinstance(tags, str): tags = [t.strip() for t in tags.split(",") if t != ""] return tags
6a700b62ef20b30e8e6e27416dbdebd46cd1a08f
58,325
from typing import OrderedDict import logging def find_unique_points(explored_parameters): """Takes a list of explored parameters and finds unique parameter combinations. If parameter ranges are hashable operates in O(N), otherwise O(N**2). :param explored_parameters: List of **explored** parameters :return: List of tuples, first entry being the parameter values, second entry a list containing the run position of the unique combination. """ ranges = [param.f_get_range(copy=False) for param in explored_parameters] zipped_tuples = list(zip(*ranges)) try: unique_elements = OrderedDict() for idx, val_tuple in enumerate(zipped_tuples): if val_tuple not in unique_elements: unique_elements[val_tuple] = [] unique_elements[val_tuple].append(idx) return list(unique_elements.items()) except TypeError: logger = logging.getLogger('pypet.find_unique') logger.error('Your parameter entries could not be hashed, ' 'now I am sorting slowly in O(N**2).') unique_elements = [] for idx, val_tuple in enumerate(zipped_tuples): matches = False for added_tuple, pos_list in unique_elements: matches = True for idx2, val in enumerate(added_tuple): if not explored_parameters[idx2]._equal_values(val_tuple[idx2], val): matches = False break if matches: pos_list.append(idx) break if not matches: unique_elements.append((val_tuple, [idx])) return unique_elements
537ae774d9e1fb2943f5e0b6c72b054642a1b883
58,326
def extract_feature(array, n_samples=1): """Extracts the feature column from an array. Note that this method expects the first column to the the row identifier and the second column to be the feature. Args: array (np.array): Numpy array to be extracted. n_samples (int): Maximum number of samples. Returns: The extracted feature column out of an array """ # Parses the data with n_samples and using the last column as the feature column feature = array[:n_samples, -1] return feature
6dbc786e611e23c6849afb42995c6253b18a1b6c
58,329
import time def date_suffix(file_type=""): """ 返回当前的日期后缀,如'180723.csv' Args: file_type: 文件类型, 如'.csv', 为""则只返回日期 Returns: str: 日期后缀 """ suffix = time.strftime("%y%m%d", time.localtime()) suffix += file_type return suffix
a9b9f6b18357e343eafe80f43296c3a7a4dd483d
58,333
import pickle def load_obj(name ): """ Loads Python object from file. File in web_map/data_obj. :param name: sting with the name of file to load object from. :return: python object from file """ with open('data_obj/' + name + '.pkl', 'rb') as f: return pickle.load(f)
2d4b4ec40865f2d99a658728527cf72f197c4bb5
58,341
def strip_from_right(string, suffix): """Strip a suffix from end of string.""" if not string.endswith(suffix): return string return string[:len(string) - len(suffix)]
85ab275483c5d5697038acb3027f12f1e2b1928f
58,347
def format_action(action): """Format request action.""" return '<name>{0}</name>'.format(action)
71871fa855310f7af5ca03f681917fa3962709e9
58,348
def list_vm_template(client, private_cloud, resource_pool, location): """ Returns the list of VMware virtual machines templates in a resource pool, in a private cloud. """ return client.list(private_cloud, location, resource_pool)
9e3f179c92815c68b6345f1b007709c762d5905e
58,353
def time_vst(velocity, displacement): """Usage: Calculate time taken using velocity and displacement.""" return displacement/velocity
11aebe9eb51b58d09153342ee1ad9fc911b2f362
58,356
def is_function(var): """ Test if variable is function (has a __call__ attribute) :return: True if var is function, False otherwise. :rtype: bol """ return hasattr(var, '__call__')
1cd3d4bb70b1568a60c4f51c04652df12a967292
58,362
def division(a, b): """This function returns a simple math division from the first argument by the second argument""" return a / b
807b48297549362b0fbf43541ebc95abd733faa8
58,366
import contextlib import io def get_help_as_text(function_name: object): """Outputs the "help" docstring for the given function as text. Examples: >>> get_help_as_text(len)\n 'Help on built-in function len in module builtins:\\n\\nlen(obj, /)\\n Return the number of items in a container.\\n\\n' >>> print( get_help_as_text(len) )\n Help on built-in function len in module builtins: len(obj, /) Return the number of items in a container. References: https://stackoverflow.com/questions/11265603/how-do-i-export-the-output-of-pythons-built-in-help-function https://docs.python.org/3/library/contextlib.html#contextlib.redirect_stdout """ with contextlib.redirect_stdout(io.StringIO()) as f: help(function_name) return f.getvalue()
8d787d49cd32d7ac1f48b5f5682ba3e1738f7f33
58,367
def eletype(eletype): """Assigns number to degrees of freedom According to iet assigns number of degrees of freedom, number of nodes and minimum required number of integration points. Parameters ---------- eletype : int Type of element. These are: 1. 4 node bilinear quadrilateral. 2. 6 node quadratic triangle. 3. 3 node linear triangle. 5. 2 node spring. 6. 2 node truss element. 7. 2 node beam (3 DOF per node). 8. 2 node beam with axial force (3 DOF per node). Returns ------- ndof : int Number of degrees of freedom for the selected element. nnodes : int Number of nodes for the selected element. ngpts : int Number of Gauss points for the selected element. """ elem_id = { 1: (8, 4, 4), 2: (12, 6, 7), 3: (6, 3, 3), 4: (18, 9, 9), 5: (4, 2, 3), 6: (4, 2, 3), 7: (6, 2, 3), 8: (6, 2, 3)} try: return elem_id[eletype] except: raise ValueError("You entered an invalid type of element.")
a1f520429f23ac53835bf0fd1279abecb378b737
58,368
import functools def only_xhr(method): """ Decorates a method in a handler to only accepts XMLHttpRequest requests. """ @functools.wraps(method) def wrapper(self, *args, **kwargs): if "X-Requested-With" in self.request.headers: if self.request.headers['X-Requested-With'] == "XMLHttpRequest": return method(self, *args, **kwargs) else: self.set_status(403) self.write("This is a XMLHttpRequest request only.") return wrapper
02e6430a474ad741ebd16df26580d25741b71d2f
58,369
def replica_diagnostic_server_port(replica_id): """ Returns a unique port for each replica All the replicas run in the same container and therefore share an IP address """ return 27500 + replica_id
e0a2e8a1022f78f720c7c19ac717f0dc93288f5a
58,370
def generate_name(string): """Generate the name attribute for the entries at the glossary.""" return string.lower().replace(' ', '-').replace("'", '')
6dd4775534485509a28171f2f440403f805b3fec
58,380
import torch def get_neighbor_bonds(edge_index, bond_type): """ Takes the edge indices and bond type and returns dictionary mapping atom index to neighbor bond types Note: this only includes atoms with degree > 1 """ start, end = edge_index idxs, vals = torch.unique(start, return_counts=True) vs = torch.split_with_sizes(bond_type, tuple(vals)) return {k.item(): v for k, v in zip(idxs, vs) if len(v) > 1}
96c56c067a3d436b67de093c0407f86900223302
58,383
def sort_scenes_by_timestamp(scenes_list): """ Sort list of scenes using timestamp """ sorted_list = sorted(scenes_list, key=lambda scene: scene.timestamp) return sorted_list
13b426480932fefc7eb75b3084ff0b2807d55be2
58,385
def flesch_kincaid_grade_level(n_syllables, n_words, n_sents): """ Readability score used widely in education, whose value estimates the U.S. grade level / number of years of education required to understand a text. Higher value => more difficult text. References: https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests#Flesch.E2.80.93Kincaid_grade_level """ return (11.8 * n_syllables / n_words) + (0.39 * n_words / n_sents) - 15.59
43139d53cb03898d4152bf9c7dcfa1564a2a1128
58,388
def latest(scores): """ Return the latest scores from the list """ return scores[-1]
ec5bb0a18f6a86e154065d4b2e4ae089ec45ffe7
58,389
def components_to_hosts(components): """Convert a list of Component namedtuples to a list of their hosts :param components: a list of Component namedtuples :returns: list of the hosts associated with each Component """ hosts = [] for component in components: hosts.append(component.host) return hosts
d41dc31903349eece69f7405ff667cee18e8c982
58,391
def clean(string_value): """Standardizes string values for lookup by removing case and special characters and spaces. Args: string_value (str): The lookup key to be transformed. Returns: str: The original value, but lowercase and without spaces, underscores or dashes. """ return string_value.lower().strip().replace("_","").replace("-","").replace(" ","")
fc03f3553eaeff18e35bd6a0290a80ddb1909882
58,393
def from_helioviewer_project(meta): """ Test determining if the given metadata contains Helioviewer Project sourced data. Parameters ---------- meta : `~astropy.utils.metadata.MetaData` The metadata to parse. Returns ------- If the data of the map comes from the Helioviewer Project, then True is returned. If not, False is returned. """ return 'helioviewer' in meta.keys()
0825ac5e261cf75abbdd919aeaa7d0184f1b3c02
58,394
def f_to_c(val): """ Converts Fahrenheit to Celsius; accepts numeric or string """ try: return (float(val) - 32.) * 5. / 9. except (TypeError, ValueError): return (val - 32.) * 5. / 9.
4a8bdda8069f87e136fa98f7b2983b4b702babc5
58,395
import socket import errno def _discovery_multicast(ip: str, sock: socket.socket, attempts: int = 5): """ Send a multicast command on the given network to discover available devices. If port is in use, try the next port up to <attempts> times Args: ip (str): Local IP sock (socket.socket): Socket object attempts (int, optional): Number of times trying different ports if port is in use. Defaults to `5`. Returns: tuple: (sent: bool, port: int) ``sent`` is `True` after sending the command. ``port`` is the one used for the connection """ multicast_ip = "239.253.1.1" port = 50000 sent = False for i in range(attempts): try: port += i sock.bind((ip, port)) sock.sendto(b"hdiq-discovery-request-py", (multicast_ip, port)) sent = True except OSError as e: if e.errno == errno.EADDRINUSE and i < attempts: print(f"Socket Error {errno.EADDRINUSE}: socket in use") continue break else: break return sent, port
0a3759df478998613537859b8452b98e266f41e8
58,396
def getfirst(obj, uri, strip=True): """Return string value of first item returned by obj.get_value(uri). By default, strips whitespace surrounding string value. If collection is empty, return None. """ data = obj.get_value(uri) if data: data = str(data[0]) if strip: data = data.strip() return data else: return None
0af10bc6ce0746acb32a1f5b1237d44348480744
58,397
from typing import Sequence from typing import Tuple def cross(a: Sequence[int], b: Sequence[int]) -> Tuple[int, int, int]: """ Returns the cross product of the three-dimensional vectors a and b """ ax, ay, az = a bx, by, bz = b return ay * bz - az * by, az * bx - ax * bz, ax * by - ay * bx
9751c1d7bbc605990190d4172f0945db2090e6b2
58,401
def umf_coeff(dp, mu, rhog, rhos, coeff='wenyu'): """ Determine minimum fluidization velocity using experimental coefficients from Wen and Yu, Richardson, Saxena and Vogel, Babu, Grace, and Chitester. This approach can be used when bed void fraction and particle sphericity are not known. Refer to Equation 25 and Table 4 in Chapter 3 of Kunii and Levenspiel [1]_. Parameters ---------- dp : float Diameter of bed particle [m] mu : float Viscosity of gas [kg/(m s)] rhog : float Density of gas [kg/m³] rhos : float Density of bed particle [kg/m³] coeff : string Keyword to determine which coefficients to use for umf calculation. Valid options are 'wenyu', 'rich', 'sax', 'babu', 'grace', and 'chit'. Default coefficients are set to 'wenyu'. Returns ------- umf : float Minimum fluidization velocity [m/s] Example ------- >>> umf_coeff(0.0005, 3.6e-5, 0.44, 2500, 'rich') 0.1192 References ---------- .. [1] Daizo Kunii and Octave Levenspiel. Fluidization Engineering. Butterworth-Heinemann, 2nd edition, 1991. """ if coeff == 'wenyu': # Wen and Yu coefficients [-] a = 33.7 b = 0.0408 elif coeff == 'rich': # Richardson coefficients [-] a = 25.7 b = 0.0365 elif coeff == 'sax': # Saxena and Vogel coefficients [-] a = 25.3 b = 0.0571 elif coeff == 'babu': # Babu coefficients [-] a = 25.3 b = 0.0651 elif coeff == 'grace': # Grace coefficients [-] a = 27.2 b = 0.0408 elif coeff == 'chit': # Chitester coefficients [-] a = 28.7 b = 0.0494 else: raise ValueError('Coefficient is not a valid option.') # g is acceleration due to gravity [m/s²], Ar is Archimedes number [-], and # Re is Reynolds number [-] g = 9.81 Ar = (dp**3 * rhog * (rhos - rhog) * g) / (mu**2) Re = (a**2 + b * Ar)**0.5 - a # minimum fluidization velocity [m/s] umf = (Re * mu) / (dp * rhog) return umf
22812a908f5666d8fe3ad83af716413d1dad24bb
58,406
import re from datetime import datetime def parse_date_sentence(sentence): """Return the date type + date in this sentence (if one exists).""" # Search for month date, year at the end of the sentence sentence = sentence.lower().strip() date_re = r".*((january|february|march|april|may|june|july|august" date_re += r"|september|october|november|december) \d+, \d+)$" match = re.match(date_re, sentence) if match: date = datetime.strptime(match.group(1), "%B %d, %Y") if 'comment' in sentence: return ('comments', date.strftime("%Y-%m-%d")) if 'effective' in sentence: return ('effective', date.strftime("%Y-%m-%d")) return ('other', date.strftime('%Y-%m-%d'))
1f91829bd2f9e871d51d4a91a05e8ed18742691f
58,414
def select(objects, fn, unlist=True, first=False): """Returns a subset of `objects` that matches a range of criteria. Parameters ---------- objects : list of obj The collection of objects to filter. fn : lambda expression Filter objects by whether fn(obj) returns True. first: bool, optional If True, return first entry only (default: False). unlist : bool, optional If True and the result has length 1 and objects is a list, return the object directly, rather than the list (default: True). Returns ------- list A list of all items in `objects` that match the specified criteria. Examples -------- >>> select([{'a': 1, 'b': 2}, {'a': 2, 'b': 2}, {'a': 1, 'b': 1}], lambda o: o['a'] == 1) [{'a': 1, 'b': 2}, {'a': 1, 'b': 1}] """ filtered = objects if type(objects) is list: filtered = [obj for obj in filtered if fn(obj)] elif type(objects) is dict: filtered = {obj_key: obj for obj_key, obj in filtered.items() if fn(obj)} if first: if len(filtered) == 0: return None elif type(filtered) is list: return filtered[0] elif type(filtered) is dict: return filtered[list(filtered.keys())[0]] elif unlist and len(filtered) == 1 and \ type(filtered) is list: return filtered[0] else: return filtered
8a24c960fdbcd8bc01b5a9cbb0b1e7fc4e82b836
58,417
import logging def get_port_map(dut, asic_index=None): """ @summary: Get the port mapping info from the DUT @return: a dictionary containing the port map """ logging.info("Retrieving port mapping from DUT") namespace = dut.get_namespace_from_asic_id(asic_index) config_facts = dut.config_facts(host=dut.hostname, source="running",namespace=namespace)['ansible_facts'] port_mapping = config_facts['port_index_map'] for k,v in port_mapping.items(): port_mapping[k] = [v] return port_mapping
f5a194c674a2b84d42cba8f53804289e9812b6be
58,418
def byteConversion(amount, btype): """ convert unit of memory size into bytes for comparing different unit measures :param amount: unit of memory size :ptype amount: float :param btype: unit type :ptype btype: string :return: unit in bytes :rtype: float """ n = 1 conversionMap = {"KB": 1, "TB": 4, "GB": 3, "MB": 2} _bytes = amount if btype.upper() in conversionMap: n = conversionMap[btype.upper()] for _ in range(n): _bytes *= 1024 return _bytes
97846d13ec7037741d7651a14409dc94f77769dd
58,420
import pickle def read_pickle(filepath): """ read a python object with a pickle""" with open(filepath, 'rb') as file: unpickler = pickle.Unpickler(file) return unpickler.load()
cb96cd0286edfd8c73f875b0a39f8887d2fbb2f0
58,422
def read_file(fname): """Return the contents of a file.""" with open(fname) as fd: contents = fd.read() return contents
680c31b11c2c774ce1ede1d4f615c0ca72001238
58,425
import random import string def random_uid() -> str: """Generate a random id which can be used e.g. for unique key names.""" return "".join(random.choices(string.digits, k=10))
79e897af6fb02dda945d6496e03734db6903b511
58,426
def decode_file_size(high: int, low: int) -> int: """File size can be encoded as 64 bits or 32 bits values. If upper 32 bits are set, it's a 64 bits integer value. Otherwise it's a 32 bits value. 0xFFFFFFFF means zero. """ if high != 0xFFFFFFFF: return (high << 32) | (low & 0xFFFFFFFF) elif low != 0xFFFFFFFF: return low else: return 0
3399d92b5524300aadfee3be7bb58bd3a8b54e17
58,427
def get_section_link(header): """ Add a section link Args: :header: (str) section header Returns: :rst: (str) RST documentation """ rst = "" rst += f".. _sec_mframwork_{header.lower().replace(' ', '_')}:" rst += "\n\n" return rst
615bb5b2acdcc7181f090373389414ceb380c807
58,433
def sha256_to_str(obj): """Convert a bytearray to its lowercase hexadecimal string representation. Args ---- obj (bytearray): bytearray representation of the string. Returns ------- (str): Lowercase hexadecimal string. """ return None if obj is None else obj.hex()
16f0fff7103d91f32bec248466e9aaddc818f29e
58,436
def inc(x): """Increments its argument""" return x + 1
23b0c854dc3307fe6411174e9b9da39b0f0988d3
58,440
import json def load_index_to_label_dict(path='index_to_class_label.json'): """Retrieves and formats the index to class label lookup dictionary needed to make sense of the predictions. When loaded in, the keys are strings, this also processes those keys to integers.""" with open(path, 'r') as f: index_to_class_label_dict = json.load(f) index_to_class_label_dict = {int(k): v for k, v in index_to_class_label_dict.items()} return index_to_class_label_dict
1bc783ed0143420e481c1f328aacfe27bd85b072
58,445
import typing def convert_value(dtype: typing.Any, value: typing.Any, default: typing.Any = None) -> typing.Union[typing.Any, None]: """ Return the passed value in the specified value if it is not None and does not raise an Exception while converting. Returns the passed default if the conversion failed :param dtype: The datatype the value should be returned :param value: The value that should be converted :param default: The default Value that should be returned if the conversion failed :return: The converted value or the default passed value """ try: if value is None: return default return dtype(value) except Exception: return default
3fd02525daf5db4b8761b3af3746004db9459429
58,451
def fn_col_mapping(df): """Return a list of column names and their associated column index number.""" return [f'{c[0]}:{c[1]}' for c in enumerate(df.columns)]
4f38f6852b186a025f2e44b80ec20235d10c6594
58,452
from pathlib import Path def get_package_path(package): """Compute the file-system path for `package`.""" if hasattr(package, "__path__"): return Path(package.__path__[0]) elif hasattr(package, "__file__"): return Path(package.__file__).parent else: raise ValueError(f"Cannot determine path for package {package}")
9ee4d63a7249ef7f1717a197346cb164e4337435
58,454
def btc_to_satoshi(btc: float) -> int: """Convert a btc value to satoshis Args: btc: The amount of btc to convert Returns: The integer of satoshis for this conversion """ return int(btc * 100000000)
5d6e7b2583bd876779e46312bdbd6cdb5303a433
58,459
def _check_limit(num, limits=[0, 100]): """ Ensures that `num` is within provided `limits` Parameters ---------- num : float Number to assess limits : list, optional Lower and upper bounds that `num` must be between to be considered valid """ lo, hi = limits num = float(num) if num < lo or num > hi: raise ValueError('Provided value {} is outside expected limits {}.' .format(num, limits)) return num
24318025e91dfe66dcd73dc6e000b16f818352c5
58,469
def validator(targetFile): """ Function to validate a password using a password policy. :param targetFile: path to input file. """ inputFile = open(targetFile, 'r') lines = inputFile.readlines() targetCount = 0 targetCountP2 = 0 # policyNumbers = the numbers to be min and max # target = the letter to be counted # passwd = the password string to be checked for line in lines: splitPolicy = line.split(" ") policy = splitPolicy[0] policyNumbers = policy.split("-") target = splitPolicy[1].replace(':', '') passwd = splitPolicy[2] if int(policyNumbers[0]) <= passwd.count(target) <= int(policyNumbers[1]): targetCount += 1 if (passwd[int(policyNumbers[0])-1] == str(target)) is not (passwd[int(policyNumbers[1])-1] == str(target)): targetCountP2 += 1 inputFile.close() return targetCount, targetCountP2
49fea7572ff1c9bae4f3b30aa671a133eeea5992
58,470
def add_compartment(model, compartment, **keywords): """ Helper function to add a compartment to the SBML model. :param model: a valid SBML model :param compartment: a Compartment object :return: SBML compartment object """ sbml_compartment = model.createCompartment() compartment_id = compartment.name sbml_compartment.setId(compartment_id) sbml_compartment.setName(compartment.name) sbml_compartment.setConstant(True) # keep compartment size constant sbml_compartment.setSpatialDimensions(compartment.spatial_dimensions) # For example, 3 dimensional compartment sbml_compartment.setSize(compartment.size) # For example, 1e-6 liter if compartment.unit is not None: sbml_compartment.setUnits(compartment.unit) return sbml_compartment
c338b2a4a3196dc0ccc2a6e0072df38ee6832c48
58,471
from datetime import datetime def _format_time(when): """ Format a time to ISO8601 format. Or if there is no time, just return ``None``. """ if when is None: return None return datetime.isoformat(when)
a1ed9c10a0868c2a64d90837d672e82fec44c294
58,475
def _parse_zeopp(filecontent: str) -> dict: """Parse the results line of a network call to zeopp Args: filecontent (str): results file Returns: dict: largest included sphere, largest free sphere, largest included sphera along free sphere path """ first_line = filecontent.split("\n")[0] parts = first_line.split() results = { "lis": float(parts[1]), # largest included sphere "lifs": float(parts[2]), # largest free sphere "lifsp": float(parts[3]), # largest included sphere along free sphere path } return results
436e7579883a8c6700d3725756dd03dc7de3cf51
58,476
import torch def merge_heads(x: torch.Tensor) -> torch.Tensor: """Merge multiple attention heads into output tensor (Batch size, Heads, Lengths, Attention size / Heads) => (Batch size, Length, Attention size) Args: x (torch.Tensor): [B, H, L, A/H] multi-head tensor Returns: torch.Tensor: [B, L, A] merged / reshaped tensor """ batch_size, _, max_length, _ = x.size() # x => (B, L, H, A/H) x = x.permute(0, 2, 1, 3).contiguous() return x.view(batch_size, max_length, -1)
4250e298b81e0acb10624446a4674cd6782a6882
58,479
import math def perceived_brightness(rgb_color): """Compute perceived brightness. cf http://www.nbdtech.com/Blog/archive/2008/04/27/Calculating-the-Perceived-Brightness-of-a-Color.aspx Args: rgb_color (str): example 'rgb(215,200,80)' """ rgb_color = rgb_color[4:-1] r, g, b = [int(i) for i in rgb_color.split(',')] return math.sqrt(r**2 * 0.241 + g**2 * 0.691 + b**2 * 0.068)
7226eb5611bd17b80077d1beac448b630c05704d
58,489
def words_normalize(words): """ Do a normalization precess on words. In this case is just a tolower(), but you can add accents stripping, convert to singular and so on... """ normalized_words = [] for index, word in words: wnormalized = word.lower() normalized_words.append((index, wnormalized)) return normalized_words
cfad1720898519bc6f9688445766103b9ca16bbe
58,490
def required_for_output(inputs: set, outputs: set, connections: dict): """ Determine which nodes and connections are needed to compute the final output. It is considered that only paths starting at the inputs and ending at the outputs are relevant. This decision is made since a node bias can substitute for a 'floating' node (i.e. node with no input and constant output). This algorithm works in two steps: * A growing-phase; adding nodes that are connected (albeit indirectly) to the outputs of the networks * A pruning-phase; removing dead hidden nodes that do not contain both an ingoing and an outgoing connection :note: It is assumed that the input identifier set and the node identifier set are disjoint. By convention, the output node ids are always the same as the output index. :param inputs: Set of the used input identifiers :param outputs: Set of all the output node identifiers :param connections: Dictionary of genome connections :return: Sets of: used inputs, used hidden nodes, used output nodes, remaining connections """ # Only consider the enabled connections enabled_conn = {k: c for k, c in connections.items() if c.enabled} non_recur_enabled_conn = {(a, b): c for (a, b), c in enabled_conn.items() if a != b} # Check if both in and outputs are used used_inputs = {a for (a, _) in enabled_conn.keys() if a < 0} if not used_inputs: return set(), set(), outputs, dict() used_outputs = {b for (_, b) in enabled_conn.keys() if b in outputs} if not used_outputs: return set(), set(), outputs, dict() # Growing-phase added_nodes = {i for i in outputs} used_nodes = {i for i in outputs} while added_nodes: # Find all nodes sending to one of the used nodes nodes = {a for (a, b) in non_recur_enabled_conn.keys() if b in used_nodes} # Update the two sets added_nodes = nodes - used_nodes used_nodes.update(nodes) # Get all the connections that are used used_conn = {(a, b): c for (a, b), c in enabled_conn.items() if (a in used_nodes) and (b in used_nodes)} non_recur_used_conn = {(a, b): c for (a, b), c in used_conn.items() if a != b} # Pruning-phase removed_nodes = {True} # dummy while removed_nodes: # Find all nodes that do both send and receive, or are input or output nodes sending = {a for (a, _) in non_recur_used_conn.keys() if a in used_nodes} receiving = {b for (_, b) in non_recur_used_conn.keys() if b in used_nodes} nodes = {n for n in used_nodes if (n in sending & receiving) or (n in inputs | outputs)} # Check if any nodes are removed and update the current sets removed_nodes = used_nodes - nodes used_nodes = nodes # Update the used connections used_conn = {(a, b): c for (a, b), c in enabled_conn.items() if (a in used_nodes) and (b in used_nodes)} non_recur_used_conn = {(a, b): c for (a, b), c in used_conn.items() if a != b} # Test if there are used connection remaining if not used_conn: return set(), set(), outputs, dict() # Connected network, return all the used nodes and connections used_inp = {n for n in used_nodes if n < 0} used_out = outputs # All outputs are always considered used used_hid = {n for n in used_nodes if n not in inputs | outputs} # Check (again) if both in and outputs are used used_inputs = {a for (a, _) in enabled_conn.keys() if a < 0} if not used_inputs: return set(), set(), outputs, dict() used_outputs = {b for (_, b) in enabled_conn.keys() if b in outputs} if not used_outputs: return set(), set(), outputs, dict() # Valid genome, return return used_inp, used_hid, used_out, used_conn
4561f6951a87c1dfc845fc60817ed80d270e904a
58,492
def _FormatChar(ch): """Convert a character into its C source description.""" code = ord(ch) if code < 32 or code > 127: return "'\\%d'" % code else: return "'%s'" % ch
c0b245731d612fb72c2e2049be429a236df4f69a
58,493
def mem_rm_payload(mem_default_payload): """Provide a membership payload for removing a member.""" rm_payload = mem_default_payload rm_payload["action"] = "removed" return rm_payload
bda0d818789e8197cc4508694ead88fc0fc5c9d7
58,494
def median(iterable): """Obtain the central value of a series Sorts the iterable and returns the middle value if there is an even number of elements, or the arithmetic mean of the middle two elements if there is an even number of elements :param iterable: a series of ordeable items :return: the median value """ items = sorted(iterable) if len(items) == 0: raise ValueError("median() arg is an empty sequence") median_index = (len(items) - 1) // 2 if len(items) % 2 != 0: return items[median_index] return (items[median_index] + items[median_index + 1]) / 2.0
fc3725a58686fba68610c3fb3295ba72242c64c3
58,495
import re def find_url(string): """ Search URL in text. Parameters ---------- string: str Text selected to apply transformation Examples: --------- ```python sentence="I love spending time at https://www.kaggle.com/" find_url(sentence) ``` """ text = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', string) return "".join(text)
9ae11f02bc1eccbce30c436cf2b02495fb18d338
58,502
def get_elapsed_time_string(elapsed_time, rounding=3): """Format elpased time Parameters ---------- elapsed_time : float Elapsed time in seconds rounding : int Number of decimal places to round Returns ------- processing_time : float Scaled amount elapsed time processing_time_unit : str Time unit, either seconds, minutes, or hours """ if elapsed_time < 60: processing_time = elapsed_time processing_time_unit = "seconds" elif 60 <= elapsed_time < 60**2: processing_time = elapsed_time/60 processing_time_unit = "minutes" else: processing_time = elapsed_time/(60**2) processing_time_unit = "hours" processing_time = round(processing_time, rounding) return processing_time, processing_time_unit
25d269236d24824fab9e9a39a1532ca98b6ff009
58,503
import inspect def non_string_iterable(obj): """ Check whether object is iterable but not string. """ isclass = inspect.isclass(obj) if isclass: condition = issubclass(obj, str) else: condition = isinstance(obj, str) return hasattr(obj, '__iter__') and not condition
a118ff750e4255c3083e8da54d52f6bf88dfdb24
58,505
def format_currency(flt): """Return a formatted UK currency string from a float""" return '£{:,.2f}'.format(flt)
534f30069c3faf5f6fa5b53e3ea4b6c7fa2d6af9
58,506
def feature_extraction(dframe): """Function to extract features.""" cols = ['concavity_worst', 'compactness_worst', 'compactness_mean', 'compactness_se', 'perimeter_se', 'concavity_mean'] deff = dframe.loc[:, cols] return deff
c5ef68c613407203643d33ae281b62d5d68bd999
58,507
def _generate_stack_status_path(stack_path): """ Given a path to the stack configuration template JSON file, generates a path to where the deployment status JSON will be stored after successful deployment of the stack. :param stack_path: Path to the stack config template JSON file :return: The path to the stack status file. >>> self._generate_stack_status_path('./stack.json') './stack.deployed.json' """ stack_status_insert = 'deployed' stack_path_split = stack_path.split('.') stack_path_split.insert(-1, stack_status_insert) return '.'.join(stack_path_split)
b0573148565e8cf338d4bc6877151cbe4186ebf5
58,509
import time def _to_time_in_iso8601(_time): """Convert int or float to time in iso8601 format.""" return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(_time))
12acaf8c021d4b98ec6c0708d18092e82fe382a7
58,510
def clean_email(raw_email): """Clean a raw email address from the log file. Arguments: raw_email [string] -- from=<user@domain.com> Returns: [string] -- user@domain.com """ temp1 = raw_email.split('from=<')[1] return temp1.rstrip('>')
a5231656bf83ca431752bbb59f41472198e04515
58,514
def jaccard_similarity( set_a, set_b, element_to_weight=None, max_intersections=None): """Calculates Jaccard similarity, a measure of set overlap. Args: set_a: First set. set_b: Second set. element_to_weight: Optional, a dict of set elements to numeric weights. This results in a weighted Jaccard similarity. Default weight is 1. max_intersections: Optional integer, ignore intersections beyond this threshold. If elements are weighted, the strongest intersections are the ones counted. Returns: Float, 0 (no overlap) - 1 (complete overlap). Two empty sets return 0. """ if element_to_weight is None: element_to_weight = {} intersection_weights = sorted([ max(element_to_weight.get(e, 1), 1) for e in set_a.intersection(set_b)], reverse=True) if max_intersections is not None: intersection_weights = intersection_weights[:max_intersections] intersection = sum(intersection_weights) union = intersection + len(set_a.union(set_b)) - len(intersection_weights) if not union: return 0.0 return intersection / float(union)
a1421e211e3a6e56bc884d0282b76c1d43d80ec2
58,518
def _pretty_name_list(names): """ Given a list of (a,b) pairs, output aligned columns with the items of the second column parenthised. Used for pretty-printing e.g. name (crsid) or socname (socid) lists. """ # might be given an iterator, need a list, might as well sort it nameList = sorted(names) try: maxlen = max(len(col1) for (col1, col2) in nameList) except ValueError: # empty sequence return '' return [' %-*s (%s)' % (maxlen, col1, col2) for (col1, col2) in nameList]
6c09ad303a72207fe58e356bbfc959afea27750b
58,521
def _guess_max_lengths(info): """Compute the maximum length for keys and lines.""" max_key_len = 0 max_line_len = 0 for sec in info: for k in info[sec]: l_k = len(k) l_data = len(info[sec]) line_len = l_k + 2 + l_data if line_len > 80: # skip lines which wraps continue if line_len > max_line_len: # found a bigger line length max_line_len = line_len if l_k > max_key_len: # Found a bigger key length max_key_len = l_k return (max_key_len, max_line_len)
1e0356f11502d0bbb65d79ce977a32201d4adc0c
58,522
def process_connected_devices(results): """Process Connected Devices Args: results (Element): XML results from firewall Returns: devices_dict (dict): A dictionary containing hostnames, IP addresses, and models """ xml_list = results.findall('./result/devices/entry') devices_dict = {} for device in xml_list: hostname = device.find('./hostname').text serial = device.find('./serial').text ip_address = device.find('./ip-address').text model = device.find('./model').text devices_dict[serial] = {'hostname': hostname, 'ip_address': ip_address, 'model': model} return devices_dict
fe037b24c1924958038b1ec649bcd921b7372e97
58,523
import hashlib def getHashForFile(f): """Returns a hash value for a file :param f: File to hash :type f: str :returns: str """ hashVal = hashlib.sha1() while True: r = f.read(1024) if not r: break hashVal.update(r) f.seek(0) return hashVal.hexdigest()
7adc2383a9f555ae64a4bc9fd34715c4c3057039
58,527
def mean_off_diagonal(a): """Computes the mean of off-diagonal elements""" n = a.shape[0] return ((a.sum() - a.trace()) / (n * n - n))
84622d750922d0889b5c83e9e1a7742f0c441f4f
58,529
def _filter_tasks_by_completed(tasks, is_completed): """ Filters tasks based on the completion status. Args: tasks ([{str:str}]): List of tasks from asana API. At the very least, must have the `completed` key. is_completed (bool or None): Whether to return tasks that are completed. False will return only uncompleted tasks. True will return only completed tasks, while None can be used to return tasks regardless of completion status. Returns: filt_tasks ([{str:str}]): The tasks that meet the filter criteria. This will be all tasks if `is_completed` is None; or will be tasks that match the provided `is_completed` status. """ if is_completed is None: return tasks return [t for t in tasks if t['completed'] == is_completed]
9ba9b9f66beb18f2435992993be626e98ac9ee84
58,531
import json def value_for_cypher(value): """ returns the value in cypher form Parameters ---------- value : str, list, or boolean Returns ------- v : str a string formatted for cypher """ if isinstance(value, str): return "'{}'".format(value) if isinstance(value, list): return json.dumps(value) else: v = "{}".format(value) if isinstance(value, bool): v = v.lower() return v
e349237f678e040371b8f0ca0ba0bb01a97682e6
58,536
def get_output(outputs, key): """ Parse and return values from a CloudFormation outputs list. :param list outputs: A list of ``dict`` having items of `(`OutputKey``, ``OutputValue``). :param unicode key: The key for which to retrieve a value from ``outputs``. :returns: A ``unicode`` value. """ for output in outputs: if output['OutputKey'] == key: return output['OutputValue']
1a6d1546a009cab46c8e94c42a10cfc024ad380f
58,537
def allowed_file(filename, extensions): """Helper function to check if a file is the correct file extension Args: filename (string): filename as a string extensions (list): The list of accepted file extensions Returns: bool: True if filename is accepted """ return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in extensions
d024a81c6701b6d1459c7d600a1e4b43ceaeba0e
58,542
def get_pattern_matches(folder, pattern): """Given all the files in the folder, find those that match the pattern. If there are groups defined, the groups are returned. Otherwise the path to the matches are returned. """ matches = [] if not folder.exists(): return matches for child in folder.iterdir(): if not child.is_file(): continue m = pattern.search(child.name) if m: groups = m.groups() if groups: matches.append(groups[0]) else: matches.append(child) return matches
0d62cbb8b136f465a3056cc55f22c174ca7dced7
58,548
def rotate_180(data): """Rotate the data 180 degrees.""" return data[::-1]
7d3f4b47a3a44849e1eba56c7b1516f09ddf91f6
58,550
def flare_value(flare_class): """Convert a string solar flare class [1] into the lower bound in W/m**2 of the 1-8 Angstrom X-Ray Band for the GOES Spacecraft. An 'X10' flare = 0.001 W/m**2. This function currently only works on scalars. Parameters ---------- flare_class : string class of solar flare (e.g. 'X10') Returns ------- value : float numerical value of the GOES 1-8 Angstrom band X-Ray Flux in W/m**2. References ---------- [1] See http://www.spaceweatherlive.com/en/help/the-classification-of-solar-flares Example ------- value = flare_value('X10') Written by S.Chakraborty, 7th March 2017 """ flare_dict = {'A':-8, 'B':-7, 'C':-6, 'M':-5, 'X':-4} letter = flare_class[0] power = flare_dict[letter.upper()] coef = float(flare_class[1:]) value = coef * 10.**power return value
eeeaa9d4f27647cbeafc3e99109765907df7cb88
58,553
import hashlib def get_sha256_sums(file_bytes): """ Hashes bytes with the SHA-256 algorithm """ return hashlib.sha256(file_bytes).hexdigest()
c1b019b7ea9db1d35c07dc71d95a4ae5d59254b4
58,556
def get_steps_to_exit(data, strange=False): """ Determine the number of steps to exit the 'maze' Starting at the first element, move the number of steps based on the value of the current element, and either bump it by one, or if 'strange' is True and the value is over three, decrease it by one. If the new position is outside the current list of elements, we can exit Returns the number of steps it takes to exit """ jumps = 0 idx = 0 while 0 <= idx < len(data): new_idx = idx + data[idx] if strange and data[idx] >= 3: data[idx] -= 1 else: data[idx] += 1 jumps += 1 idx = new_idx return jumps
a1ba7c9374cbba9d6a0a8ad091a7fbfcbe71744a
58,559
from datetime import datetime def parse_heart_json(input_json): """ Input JSON from Shortcuts currently contains two keys: - hrDates: Contains an array of all timestamps (ascending order) - hrValues: Contains an array of all heart rates (sorted by timestamp, ascending) This converts the dictionary into a list of tuples: each containing a timestamp and the corresponding HR reading. If parsing fails, None is returned. """ try: hr_dates = [datetime.strptime(x, '%Y-%m-%d %H:%M:%S') for x in input_json['hrDates']] hr_values = [float(x) for x in input_json['hrValues']] all_data = list(zip(hr_dates, hr_values)) if(len(all_data) == 0): print("No health samples found in data. Nothing will be exported.") return(all_data) except ValueError as ve: print("Error parsing the dates or values returned from the Shortcuts app: {}".format(ve)) return(None)
6a37331e778bab5c00a0b6d41388cae31298f005
58,560