content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import re def _RunAndNotrunTests(stdio_tests, user_tests): """Return lists of run and not-run instances of given user tests. The first list is of test instances present in the |stdio_tests| list. Presence indicates that the test instance was run on the build. The second list is tests that are absent from the |stdio_tests| list. Absence means that no instance of the test was run on the build. Note that there can be multiple instances of a user-specified test run on a build if a) The test belongs to test group, and b) the test was run with multiple test data values. This function uses a regex to search for multiple instances of tests that match the user-specifed test name. Args: stdio_tests: List of test instances run on build. user_tests: List of test names specified by user. Returns: 1) run_tests: list of test instances of user tests run on build. 2) notrun_tests: list of user tests not run on build. """ run_user_tests = [] notrun_user_tests = [] for user_test in user_tests: pattern = r'(.*/)?%s(/\d*)?$' % user_test # pattern for test name. found_run_test = False for stdio_test in stdio_tests: if re.search(pattern, stdio_test): found_run_test = True run_user_tests.append(stdio_test) if not found_run_test: notrun_user_tests.append(user_test) print(' User tests: Instances run: %s' % len(run_user_tests)) print(' User tests: Not run: %s\n' % len(notrun_user_tests)) return run_user_tests, notrun_user_tests
abb1377d2a467e9d371e7cd651d81e6dab437743
104,467
def extract_etymology(entry): """ extracts the etymology part of a given entry string """ state = 0 etymology = "" for ch in entry: if state == 0: if ch == "[": state = 1 elif ch == "]": raise Exception(f"unexpected ] in: {entry}") else: pass elif state == 1: if ch == "[": raise Exception(f"unexpected [ in: {entry}") elif ch == "]": state = 2 else: etymology += ch elif state == 2: if ch == "[": state = 1 etymology = "" # reset etymology so only last [...] is counted elif ch == "]": raise Exception(f"unexpected ] in: {entry}") else: pass else: raise Exception(f"unexpected state {state} in: {entry}") if state not in [0, 2]: raise Exception(f"unexpected end state {state} in: {entry}") return etymology
def5c936f72bb937f7dd5ad168f8a595318d40ef
104,468
import requests def cached_get(*args, **kwargs): """Cached version of requests.get. :return: text of response :rtype: str """ response = requests.get(*args, **kwargs) response.raise_for_status() return response.text
90ea6064665a623595cc46331e0555e99ca58d9d
104,470
def is_pandigital(value): """ Check that each digit appears once only. :param value: integer value (can also be negative) :retuns: true when given number is pandigital see http://en.wikipedia.org/wiki/Pandigital_number >>> is_pandigital(1234567890) True >>> is_pandigital(12345678900) False >>> is_pandigital(9876543210) True >>> is_pandigital(10240) False """ value = abs(value) digits = set() counter = 0 while value > 0: digits.add(value % 10) counter += 1 value //= 10 return counter == len(digits)
15b6090ed35799b665e84bbf66f524994df33207
104,476
def get_info(image): """ Retrieves image information from an image ID and returns it as a dictionary """ return { 'architecture': image.architecture, 'creation_time': image.creation_time, 'description': image.description, 'disk_device_mappings': image.disk_device_mappings, 'image_id': image.image_id, 'image_name': image.image_name, 'image_owner_alias': image.image_owner_alias, 'image_version': image.image_version, 'is_copied': image.is_copied, 'is_self_shared': image.is_self_shared, 'is_subscribed': image.is_subscribed, 'is_support_cloudinit': image.is_support_cloudinit, 'is_support_io_optimized': image.is_support_io_optimized, 'platform': image.platform, 'product_code': image.product_code, 'progress': image.progress, "region": image.region, "size": image.size, "status": image.status, "tags": image.tags, "usage": image.usage, "osname": image.osname, "ostype": image.ostype }
a7e9960bc88c77926a02764e198028e0144e4c77
104,477
from typing import Union from typing import List def list_of_strs( string: str, delimiter: Union[str, None] = None, strip=True ) -> List[str]: """Cast a string to a list of strings. Args: string: String to be converted to a list of str's. delimiter: Delimiter between str's in the string. Default is to split with any whitespace string (see str.split() method). strip: Whether to strip the substrings (i.e. remove leading and trailing whitespaces after the split with a delimiter that is not whitespace). """ if strip: return [s.strip() for s in string.split(sep=delimiter)] return string.split(sep=delimiter)
93df99f1259f420ef109a5aa4dcaff56f1df1dd6
104,478
def normalize_azimuth(azimuth, zero_center=False): """Normalize an azimuth in degrees so it falls between 0 and 360. If ``zero_center=True``, azimuth will be normalized between -180 and 180. """ if (azimuth > 360 or azimuth < 0): azimuth %= 360 if zero_center: if azimuth > 180: azimuth -= 360 return azimuth
6bc5ece4345620532e5136d60d49b5db91bf826c
104,479
def length_of_last_word(text): """ Given a string s consists of upper/lower-case alphabets and empty space characters ' ', return the length of last word in the string. NOTE: Please make sure you try to solve this problem without using library functions. Make sure you only traverse the string once.If there is one word it is the last word. Args: text (str): long string consisted of word(s) Return: length (int): length of the last word in the string """ # counter for the last word in the string length = 0 last_word = "" # start iterating the string at the end of it for i in range(len(text)-1, -1, -1): char = text[i] if char.isalpha() and i == 0: length = 0 elif char.isalpha(): last_word += char length += 1 # stop the loop if we there is space, because it means # we count the length of the last word already elif char.isspace(): break # print(last_word[::-1]) return length
0a0aa1590bf5ac8323a53a5d34ebe87921fcd95d
104,480
def tuple_format(model): """Convert a model to it's tuple format""" return tuple((int(a), int(b)) for a, b in zip(*model))
f63ad19b90073a1c0bd071629ecd964770a87fa7
104,485
def Identity(x): """Return whatever it was called with.""" return x
56d73294bb1a01e18040ba42760a861dd68b5b7d
104,486
def bubble_sort(list): """Sort list using Bubble Sort algorithm Arguments: list {integer} -- Unsorted list Returns: list {integer} -- Sorted list """ swap=True test ="It is a bad code"; while swap: swap = False for n in range(len(list) - 1 ): if list[n] > list[n+1]: current = list[n] list[n] = list[n + 1] list[n + 1] = current swap = True return list
2a964717a2aaa54d33f07a9b26a4017ab5e7ee07
104,488
def lin_reg_var_organinc_lasso(X, y, pen_val, coef, intercept=None, sample_weight=None): """ Estimates the linear regression variance using the organic Lasso estimate (Yu and Bien, 2019). This requires first fitting a L1 squared penalized model. Parameters ---------- X: array-like, shape (n_samples, n_features) The training covariate data. y: array-like, shape (n_samples, ) or (n_samples, n_responses) The training response data. pen_val: float The squared L1 penalty value. coef: array-like, shape (n_features, ) The Lasso estimated coefficient. intercept: None, float The (optional) estimated intercept. sample_weight: None or array-like, shape (n_samples,) (Optional) Individual weights for each sample. Output ------ sigma_sq: float An estimate of the noise variance. References ---------- Yu, G. and Bien, J., 2019. Estimating the error variance in a high-dimensional linear model. Biometrika, 106(3), pp.533-546. """ if sample_weight is not None: raise NotImplementedError # See equation (17) y_hat = X @ coef if intercept is not None: y_hat += intercept RSS = ((y - y_hat) ** 2).sum() L1_sq = abs(coef).sum() ** 2 sigma_sq = (1 / X.shape[0]) * RSS + 2 * pen_val * L1_sq return sigma_sq
816354086f0bb33f6525deea05717a1535a97e8c
104,489
import math def decomposeTwoByTwo(twoByTwo): """Decompose a 2x2 transformation matrix into components: - rotation - scalex - scaley - skewx - skewy """ a, b, c, d = twoByTwo delta = a * d - b * c rotation = 0 scalex = scaley = 0 skewx = skewy = 0 # Apply the QR-like decomposition. if a != 0 or b != 0: r = math.sqrt(a * a + b * b) rotation = math.acos(a / r) if b > 0 else -math.acos(a / r) scalex, scaley = (r, delta / r) skewx, skewy = (math.atan((a * c + b * d) / (r * r)), 0) elif c != 0 or d != 0: s = math.sqrt(c * c + d * d) rotation = math.pi / 2 - (math.acos(-c / s) if d > 0 else -math.acos(c / s)) scalex, scaley = (delta / s, s) skewx, skewy = (0, math.atan((a * c + b * d) / (s * s))) else: # a = b = c = d = 0 pass return rotation, scalex, scaley, skewx, skewy
91d7a4be94d28f39935ccfc75edf84a4cd8409c7
104,491
def get_owner_id(logs): """Returns the logs' owner_id.""" return logs['job'][0]['owner_id']
8d9b124057a36cce842ac0b8625725fba5ab76e5
104,495
def default_terminal_condition_for_agility(env, max_roll: float = 1.8, max_pitch: float = 1.8, min_height: float = 0.0, enforce_foot_contacts: bool = False): """A default terminal condition for more agile tasks (i.e. jumping). The robot is considered as fallen if the base position is too low, the base tilts/rolls too much or parts of the body other than the feet touch the ground. Args: env: An instance of the gym env. max_roll: Max roll before the episode terminates. max_pitch: Max pitch before the episode terminates. min_height: Min height before the episode terminates. enforce_foot_contacts: Ensure that contacts are established with the feet. Returns: A boolean indicating if the episode should be terminated. """ # Make sure that contacts are only made with the robot's feet. unwanted_collision = False if enforce_foot_contacts: knee_link_ids = [2, 5, 8, 11] contacts = env.pybullet_client.getContactPoints(bodyA=env.robot.robot_id) for contact in contacts: if contact[1] != contact[2]: foot_contact = contact[3] in knee_link_ids unwanted_collision = unwanted_collision or not foot_contact roll, pitch, _ = env.robot.base_roll_pitch_yaw pos = env.robot.base_position return (abs(roll) > max_roll or abs(pitch) > max_pitch or pos[2] < min_height or unwanted_collision)
f7d54308c7719e3ba96a39e2b7505d106dbbb17d
104,500
def parse_fasta(filepath): """ Parses a fasta file and extracts the sequence descriptions and sequences. :param filepath: The filepath of the file containing the multiple fasta sequences. :return: A dictionary mapping fasta descriptions to their sequences. """ queries = {} f = open(filepath, 'r') content = f.read() f.close() content = content.split("\n") i = 0 while i < len(content): if len(content[i].strip()) > 0: if content[i].strip()[0] == '>': des = content[i][1:].strip() queries[des] = content[i+1] i += 1 i += 1 return queries
be1e0576749ac61fba1f5ad9e30f6da41ebd1426
104,502
def join_list_pretty(ls): """ Join a list in a human readable way. An empty list returns the empty string. A list with a single element returns the only element of the list converted to a string. A list with two elements returns a string in the format "x and y". A list with three or more elements returns a string in the format "x, y, and z" Parameters ---------- ls: list The list to join """ if len(ls) == 0: return "" elif len(ls) == 1: return str(ls[0]) elif len(ls) == 2: return str(ls[0]) + " and " + str(ls[1]) else: return ", ".join((str(x) for x in ls[:-1])) + ", and " + str(ls[-1])
4db280430dcb0a57b0566cdfe7f365eb177840f8
104,506
import math def c_statistic_with_95p_confidence_interval(cstat, num_positives, num_negatives, z_alpha_2=1.96): """ Calculates the confidence interval of an ROC curve (c-statistic), using the method described under "Confidence Interval for AUC" here: https://ncss-wpengine.netdna-ssl.com/wp-content/themes/ncss/pdf/Procedures/PASS/Confidence_Intervals_for_the_Area_Under_an_ROC_Curve.pdf Args: cstat: the c-statistic (equivalent to area under the ROC curve) num_positives: number of positive examples in the set. num_negatives: number of negative examples in the set. z_alpha_2 (optional): the critical value for an N% confidence interval, e.g., 1.96 for 95%, 2.326 for 98%, 2.576 for 99%, etc. Returns: The 95% confidence interval half-width, e.g., the Y in X ± Y. """ q1 = cstat / (2 - cstat) q2 = 2 * cstat ** 2 / (1 + cstat) numerator = cstat * (1 - cstat) \ + (num_positives - 1) * (q1 - cstat ** 2) \ + (num_negatives - 1) * (q2 - cstat ** 2) standard_error_auc = math.sqrt(numerator / (num_positives * num_negatives)) return z_alpha_2 * standard_error_auc
31f09fb8fcdf018d86d8d5c5e5c3adce3861da1d
104,512
def calc_actual_vapor_pressure(es_tmin, es_tmax, hmin, hmax): """ Calculate actual vapor pressure from saturation vapor pressure and relative humidity. Equation 17 of Allen (1998). Parameters ---------- es_tmin : numpy ndarray Saturation vapor pressure at minimum air temperature. es_tmax : numpy ndarray Saturation vapor pressure at maximum air temperature. hmin : numpy ndarray Minimum relative humidity, in percent hmax : numpy ndarray Maximum relative humidity, in percent """ return ((es_tmax * hmin / 100.) + (es_tmin * hmax / 100.)) / 2.
7f03569d53539fce542b334ae2cade72d6c6513c
104,515
def _list_to_and_str(lyst): """Convert a list to a command delimited string with the last entry being an and :param lyst: The list to turn into a str :type lyst: list :return: The nicely formatted string :rtype: str """ res = "{most} and {last}".format(most=", ".join(lyst[:-1]), last=lyst[-1]) return res
11b2e6db6d492f971e36c66c1187207b5d69e810
104,518
from datetime import datetime def validate_calender(calender): """ Helper function build out calender based on current time. :param Dict calender: Calender dict :return: Updated Calender dict :rtype: Dict """ today = datetime.today() # Keep looping until the quarter start is after today for quarter, value in calender.items(): if value[0]['start_date'] > today: return {quarter: value}
9eaa43406d1967361671f6e53403b0fcdd1f61d4
104,519
def compute_average_uptime(cursor): """Returns the average uptime of a farmer.""" cursor.execute('''SELECT avg(100 * uptime / (last_date - first_date)) FROM farmers''') avg_uptime = cursor.fetchone()[0] return avg_uptime
5296a379f19d33a077394c1a5d1aae41268efbf3
104,523
import re def get_data_url(response): """ Returns the url contained in the data-ajax attribute from the response html content. """ matches = re.search("url\": \\'(.*?)'", response.content.decode('utf-8'), re.S | re.M) if not matches: return '' return matches.group(1)
d8d7535294a3809d45c14d2ca2a34c927a261c07
104,527
def get_operation_model(svc, fn): """Return operation model for method of a service model :param svc: AWS service client :param fn: method of svc :return: operation model for fn """ if isinstance(fn, str): func_name = fn else: func_name = fn.__name__ return svc._service_model.operation_model(svc._PY_TO_OP_NAME[func_name])
a8ce8c7ab654e211189e22084161e794b4ae5a3a
104,528
def most_expensive_menu_item(restaurant): """ Loops through a list of menu items and determines the most expensive item Parameters: restaurant (dict): A dictionary with three lists items, prices, and cals Returns: str: A string with the name of the most expensive item """ highest_price = 0 highest_price_index = None for index, price in enumerate(restaurant['prices']): if price > highest_price: highest_price = price highest_price_index = index return restaurant['items'][highest_price_index]
a06e0ec925dc573a2dd6523497870dda663d051d
104,529
import shutil def center(s): """Return a centered string.""" width = shutil.get_terminal_size().columns return ' ' * ((width - len(s)) // 2) + s
f43aa9a487cb06410f3b1e177b4e00229a71b3a6
104,539
import sqlite3 def get_connection(blog_id): """Connects to a local sqlite database Parameters: ~~~~~~~~~~~ blog_id : id The id of the blog whose database to connect to. Returns: ~~~~~~~~ conn : sqlite3.Connection A sqlite3 Connection object, connected to a local database. """ blog_id = str(blog_id) conn = sqlite3.connect(''.join((blog_id, '.db'))) return conn
95744aacac7dfb2539fff9bdf84cc96536429716
104,540
import re def get_file_details(filename): """ Gets the season format and number from a filename :param filename: string :return: (string, string, string), strings of name, season, episode or None """ file_list = filename.split('.') for i in range(len(file_list)): match_obj = re.match(r'([sS]\d+)([eE]\d+)', file_list[i]) if match_obj: season = match_obj.group(1).upper() episode = match_obj.group(2).upper() name = '.'.join(file_list[:i]) return name, season, episode return None
3d0179a41a6eaeeb783c582559e266b3a4a2863b
104,543
def _imgref_for_invocation_digest(build_context_digest): """ Returns the image reference to tag an image given the digest of its build context. Such an image reference allows linking together a build context and an image. Args: build_context_digest: The hex digest for the build context. Returns: An image reference. """ return 'build-context:' + build_context_digest
5cf742e4f86e0f4dfb1a81d0bb87b6edf4fdaa3f
104,545
def get_playerdict(table): """ Returns a dictionary of seat indexes and players. """ players = {} for i, s in enumerate(table.seats): if s.occupied(): players[i] = s.player return players
c363470e17151740a8979acc455046704d3b58cd
104,546
def array_equal(x, y, interval=None): """Check if two one-dimentional integer arrays are equal. Parameters ---------- x, y : ndarray, int 1D vectors of integers. interval : tuple, int shape (2, ), optional A pair of integers defining a half open interval. Returns ------- equality : bool True if `x` and `y` are equal (within an interval if specified). """ if interval is None: r = range(len(x)) else: r = range(interval[0], interval[1]) for i in r: if x[i] != y[i]: return False return True
f2c5d6c0cd1f3b59f198c2322492f7af9e392880
104,550
import collections def profile_light_trace(trace_data, indicator, frequency): """Profile trace data for light. Parameters ---------- trace_data : dict Original trace data in light. indicator : list Indicator type to profile. frequency : int NPU frequency Returns ------- result : list[OrderedDict] The results of profiler. """ result = list() for layer in trace_data["layers"]: l_data = collections.OrderedDict() # op l_data["op"] = collections.OrderedDict() l_data["op"]["type"] = layer["ops"] l_data["op"]["name"] = layer["names"] # ddr if "mem" in indicator or "all" in indicator: l_data["memory"] = collections.OrderedDict() l_data["memory"]["accum_ddr"] = layer["accum_ddr"] l_data["memory"]["coeff_ddr"] = layer["coeff_ddr"] l_data["memory"]["input_ddr"] = layer["input_ddr"] l_data["memory"]["output_ddr"] = layer["output_ddr"] # cycle if "cycle" in indicator or "all" in indicator: l_data["cycle"] = layer["cycles"] l_data["time_ms"] = layer["cycles"] / frequency * 1000 # ms result.append(l_data) return result
d223457c7e71fbce7fa161b37d859007f6fbfb6b
104,552
def wrdvi(nir, red, alpha=0.1): """ Compute Wide Dynamic Range Vegetation Index from red and NIR bands WRDVI = \\frac { \\alpha NIR - RED } {\\alpha NIR + RED } :param nir: Near-Infrared band :param red: Red band :param alpha: Weighting coefficient, usually in [0.1-0.2] :return: WRDVI """ return (alpha*nir - red) / (alpha*nir + red)
a20a3f6909e96116fd8b5fa5bd8d7e859d638c16
104,553
def get_a_node(base, node_id, show_details=False): """Utility function that gets a Senlin node.""" params = None if show_details: params = {'show_details': True} res = base.client.get_obj('nodes', node_id, params) return res['body']
7104fc4bdeceefd682ff45de3dcf309bbd1b4c46
104,556
def mappingSightingOpenIOC(etype): """ Map the openioc types to threat sightings """ mapping = { "sha256": "FileItem/Sha256sum", "ipv4": "DnsEntryItem/RecordData/IPv4Address", "domain": "Network/URI", "url": "UrlHistoryItem/URL", "dstHost": "Network/URI", "md5": "FileItem/Md5sum", "sha1": "FileItem/Sha1sum", "ipv6": "DnsEntryItem/RecordData/IPv6Address", "file": "FileItem/FileName", "name": "FileItem/FileName", "path": "FileItem/FilePath", "key": "RegistryItem/KeyPath" } return mapping[etype]
69f64e6c7296d3f91df57140a29fe1615707003a
104,558
def get_method(name, attrs, bases, exclude=None): """ Gets a method of a class by name. :param name: Name of method to get. :type name: str :param attrs: Dictionary of class attributes. :type attrs: dict :param bases: Bases for the class to use for lookup. :type bases: list :param exclude: Iterable of bases to exclude from search. :type exclude: list :return: The class method or None """ if exclude is None: exclude = [] # If the method is present return it directly. if name in attrs: return attrs[name] # Try to find the method one by one for b in bases: sub_method = get_method(name, b.__dict__, b.__bases__, exclude=exclude) if sub_method is not None: if b in exclude: return None else: return sub_method # else return None return None
4438514fa1d74bcda05e045b323c942d1cbf6f66
104,562
from typing import List from typing import Dict def list_to_dict(list_of_dicts: List, key: str) -> Dict: """Convert a list of `dict_a` to a `dict_b` where the `key` in `dict_b` is an item in each `dict_a`. Args: list_of_dicts (List): list of items to convert to dict. key (str): Name of the item in `dict_a` to use as primary key for `dict_b`. Returns: A dictionary with items from the list organized by key. """ d_b = {} for d_a in list_of_dicts: d_b_key = d_a.pop(key) d_b[d_b_key] = d_a return d_b
cfd26a0ea2dfd2c804397767523ef508a952f354
104,563
def total_minutes_from(h, m): """Total Minutes from hour and minute.""" return h * 60 + m
e00c968a6fcc9dc991409b91cead7d540495dbb6
104,569
def heatmap_threshold(heatmap, threshold=1): """ Returns a heatmap with values below threshold filtered out. """ heatmap[heatmap <= threshold] = 0 return heatmap
a51ceec73e5413ac0540e9fb942a9d7a50dce065
104,570
def generate_new_url(page_num): """generate new habr.com url by page number""" base_url = "https://habr.com/all/" return "{}/page{}".format(base_url, page_num)
91ab4da2da57c1765cd03a3c78b021c2632f156b
104,571
def b(i, j): """Block coordinates""" return [(i, j), (i, j+1), (i+1, j), (i+1, j+1)]
ec69de045eb98c81b34dac030aadd8668641a8cb
104,572
def aspect_scale(img, rescale_tuple): """Get scaled image dimensions while retaining aspect ratio.""" bx, by = rescale_tuple ix,iy = img.get_size() if ix > iy: scale_factor = bx/float(ix) sy = scale_factor * iy if sy > by: scale_factor = by/float(iy) sx = scale_factor * ix sy = by else: sx = bx else: scale_factor = by/float(iy) sx = scale_factor * ix if sx > bx: scale_factor = bx/float(ix) sx = bx sy = scale_factor * iy else: sy = by return int(sx), int(sy)
431c9ff6b4162fe297677931df1ae5f67d001d84
104,576
import torch def packed_to_padded(seq, target_packed): """Converts a sequence of packed outputs into a padded tensor Arguments: seq {list} -- List of lists of length T (longest sequence) where each sub-list contains output tokens for relevant samples. E.g. [len(s) for s in seq] == [8, 8, 8, 4, 3, 1] if batch has 8 samples longest sequence has length 6 and only 3/8 samples have length 3. target_packed {list} -- Packed target sequence Return: torch.Tensor: Shape bs x T (padded with 0) NOTE: Assumes that padding index is 0 and stop_index is 3 """ T = len(seq) batch_size = len(seq[0]) padded = torch.zeros(batch_size, T) stopped_idx = [] target_packed += [torch.Tensor()] # Loop over tokens per time step for t in range(T): seq_lst = seq[t].tolist() tg_lst = target_packed[t - 1].tolist() # Insert Padding token where necessary [seq_lst.insert(idx, 0) for idx in sorted(stopped_idx, reverse=False)] padded[:, t] = torch.Tensor(seq_lst).long() stop_idx = list(filter(lambda x: tg_lst[x] == 3, range(len(tg_lst)))) stopped_idx += stop_idx return padded
0de3c8b81f2770827a83b97f95b56aa4d33250d3
104,582
def decompile_scriptPubKey(asm): """ >>> decompile_scriptPubKey('OP_DUP OP_HASH160 cef3550ff9e637ddd120717d43fc21f8a563caf8 OP_EQUALVERIFY OP_CHECKSIG') '76a914cef3550ff9e637ddd120717d43fc21f8a563caf888ac' """ asm = asm.split(" ") hex = "" if asm[0] == 'OP_DUP': hex += "76" if asm[1] == 'OP_HASH160': hex += 'a9' if len(asm[2]) == 40: hex += asm[2] if asm[3] == 'OP_EQUALVERIFY': hex += '88' if asm[4] == 'OP_CHECKSIG': hex += 'ac' return hex
b54f613f07f18a06114cefaa33def810dfed8078
104,586
def splitIntoTrainTest(data, frac=0.1): """ Split the data into train/test sets. @frac is the fraction of data points that need to be taken to make the testing set. @data is tuple (X, Y). Returns ((X_train, Y_train), (X_test, Y_test)) """ # Split X and Y and get the size of the dataset X, Y = data size = len(X) # Mark and split. marker = int(size * (1 - frac)) return ((X[:marker], Y[:marker]), (X[marker:], Y[marker:]))
b36bfd6444f0594b78ab96bf3ee73bf70ca59ee3
104,594
def __reduce_metrics_by(metrics): """ Auxiliary function used to reduce the metrics by the keys in a dictionary Example reduce by turn or by query_type metrics - dict with keys as the reduce type and each value is a tuple (dict_metrics, number_turns), dic_metrics is a dict with the name of the metric as key and a value """ final_metrics = {} for turn, value in metrics.items(): if turn not in final_metrics: final_metrics[turn] = {} for k, v in value[0].items(): # value[0] is the metrics final_metrics[turn][k] = v / value[1] # value[1] is the number turns final_metrics[turn]["number_tuns"] = value[1] return final_metrics
b71b1ee083cfd696dfb82c3d41a09cc350db338f
104,596
import time def format_time(parts, compact=False): """ Format a split-up message ID to a timestamp string If compact is true, an alternate slightly shorter representation without whitespace is emitted. Both compact and "long" forms have a fixed width, and are recognized by parse_time(). """ sec, ms, seq = parts if compact: return '%s.%03d#%04d' % (time.strftime('%Y-%m-%d_%H:%M:%S', time.gmtime(sec)), ms, seq) else: return '%s.%03d Z, #%04d' % (time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(sec)), ms, seq)
8deaa3e092186367d98167f7d826bdd9585fe1d1
104,600
def is_valid_file(ext, argument): """ Checks if file format is compatible """ formats = { 'input_dataset_path': ['csv'], 'output_model_path': ['pkl'], 'output_dataset_path': ['csv'], 'output_results_path': ['csv'], 'input_model_path': ['pkl'], 'output_test_table_path': ['csv'], 'output_plot_path': ['png'] } return ext in formats[argument]
a73fb2091d19ad9e108999dba56e99b326a9bfa3
104,603
from datetime import datetime def dmy_to_ymd(date): """Return a datetime object with the format YYYY/MM/DD:HH:MM:SS The parameter date is the date to transforme """ return datetime.strptime(date, '%d/%b/%Y:%H:%M:%S')
9320f0b00df6591b6b90c0f160b129a6081fe0f7
104,607
def body_formatter_user(user): """Create the body of an email using the user information. Parameters ---------- user : :class:`ramp_database.model.User` The user profile. Returns ------- body : str The email body. """ body = """ user = {} name = {} {} email = {} linkedin = {} twitter = {} facebook = {} github = {} notes = {} bio = {} """.format( user.name, user.firstname, user.lastname, user.email, user.linkedin_url, user.twitter_url, user.facebook_url, user.github_url, user.hidden_notes, user.bio, ) return body
364a2019c089f6e034483f76dc16830d200fdf04
104,611
import logging def get_subarray_info(params, subarray_table): """Find aperture-specific information from the subarray information config file Parameters: ----------- params : dict Nested dictionary containing Mirage parameters. subarray_table : astropy.table.Table Table containing subarray definition information. Output from read_subarray_definition_file Returns: -------- params : dict Updated nested dictionary """ logger = logging.getLogger('mirage.utils.utils.get_subarray_info') array_name = params['Readout']['array_name'] # For MASKSWB and MASKLWB apertures, the filter name is part of the aperture # name. But the subarray definition file contains only an entry for the aperture # name without the filter name. In that case, strip off the filter name here # before checking the definition file if '_MASKLWB' in array_name or '_MASKSWB' in array_name: pieces = array_name.split('_') array_name = '{}_{}'.format(pieces[0], pieces[1]) if array_name in subarray_table['AperName']: mtch = array_name == subarray_table['AperName'] namps = subarray_table['num_amps'].data[mtch][0] if namps != 0: params['Readout']['namp'] = int(namps) else: try: if ((params['Readout']['namp'] == 1) or (params['Readout']['namp'] == 4)): logger.info(("CAUTION: Aperture {} can be used with either " "a 1-amp or a 4-amp readout. The difference is a factor of 4 in " "readout time. You have requested {} amps.".format(subarray_table['AperName'].data[mtch][0], params['Readout']['namp']))) else: raise ValueError(("WARNING: {} requires the number of amps to be 1 or 4. Please set " "'Readout':'namp' in the input yaml file to one of these values." .format(array_name))) except KeyError: raise KeyError(("WARNING: 'Readout':'namp' not present in input yaml file. " "{} aperture requires the number of amps to be 1 or 4. Please set " "'Readout':'namp' in the input yaml file to one of these values." .format(array_name))) else: raise ValueError(("WARNING: subarray name {} not found in the " "subarray dictionary {}." .format(array_name, params['Reffiles']['subarray_defs']))) return params
60a56bb05c4478257c2801f7465834e13208794a
104,614
def is_secure_scheme(scheme: str) -> bool: """Check if the given scheme is secure. Args: scheme: Scheme to check Returns: Whether the scheme is secure. """ return scheme in {"tcps", "tcp4s", "tcp6s", "rss"}
15806677c831d7fcdbab205f4d04e1b9effd6b62
104,618
import math def comb(n: int, k: int) -> int: """Return the possible combination count of given n, and k""" if n < k: return 0 return math.factorial(n) // (math.factorial(k) * math.factorial(n - k))
fa3e17cac4d6d4f75aa549b8986cdbede3a9508b
104,619
import hashlib def salt(pin): """ Returns a salt given the supplied pin. :param bytes pin: the pin :rtype: bytes """ m = hashlib.sha1() m.update(b'\x55\x55\x55\x55\x55\x55\x55\x55') m.update(b'\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa') m.update(pin) m.update(b'\x55\x55\x55\x55\x55\x55\x55\x55') m.update(b'\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa') return m.digest()
f96b320f2f7f7fcfb7aaadd5a5fb4619cc11ae15
104,622
import filecmp def comp_fnames_file_equality(fname1, fname2): """ Compare filenames: File equality: Check if contents of files are identical. Equality = True/False. Uses filecmp byte-comparison (more efficient than md5). """ equality = filecmp.cmp(fname1, fname2, shallow=True) if equality is True: ## if os.stat seems the same... equality = filecmp.cmp(fname1, fname2, shallow=False) ## ...check contents to confirm identical return equality
61647f46d8d25484d15167f332f670c08ea1ad5b
104,625
def cm2inch(*values): """Convert from centimeter to inch Parameters ---------- *values Returns ------- tuple Examples -------- >>> cm2inch(2.54, 5.08) (1.0, 2.0) """ return tuple(v / 2.54 for v in values)
13ba2cbcffd277e5c83cf1b0d79d1cf9abe650ec
104,631
def list_datasets(group, prefix="", res=None): """Return a list of all datasets under the specified group""" if res is None: res = [] # Loop over objects in the group for (name,obj) in group.iteritems(): if hasattr(obj, "shape"): # Dataset, so record name res.append(str(prefix+"/"+name)) if hasattr(obj, "keys"): # Group, so recursively examine it list_datasets(obj, str(prefix+"/"+name), res) return res
a99f407f4c7215d525012110effb9fb0797d928a
104,636
def get_independent_nodes(dag): """Get a list of all node in the graph with no dependencies.""" nodes = set(dag.keys()) dependent_nodes = set([node for downstream_nodes in dag.values() for node in downstream_nodes]) return set(nodes - dependent_nodes)
6f714bae30d837fb1dba673f3027c6ec6e57c884
104,638
def clamp_rgb(values: tuple[int, ...]) -> tuple[int, ...]: """Clamp values in a given tuple to a maximum value of 156. Arguments: values: A tuple of RGB(A) values to be clamped. Returns: The clamped result of `values`. """ return tuple(min(156, i) if i >= 0 else 0 for i in values)
ac95062aec1244ce8808ed70034c488ea461fbc9
104,639
from pathlib import Path def is_in_container() -> bool: """Return true if this process is likely running inside of a container.""" # https://stackoverflow.com/a/49944991/38265 and https://github.com/containers/podman/issues/3586 cgroup = Path("/proc/self/cgroup") return ( Path("/.dockerenv").exists() or Path("/run/.containerenv").exists() or (cgroup.exists() and "docker" in cgroup.read_text("utf-8")) )
3b2c26b055a6ede4e56ca4c9b3480f8489f46ec0
104,641
import asyncio def invoke_plugins(plugins, func, *args): """ Invokes an async function in parallel on all supplied plugins. :param plugins: a dict of plugins (name -> plugin) :param func: the name of the function to invoke if it exists :type func: str :param *args: the arguments to pass to the plugin function :returns: Future instance """ asyncfuncs = [getattr(p, func)(*args) for p in plugins.values() if hasattr(p, func)] return asyncio.gather(*asyncfuncs)
85b1f9f62fa9e88a64a440b2240d49a89f319aba
104,645
from typing import Optional import re def isolate_timestamp_in_release(release: str) -> Optional[str]: """ Given a release field, determines whether is contains a timestamp. If it does, it returns the timestamp. If it is not found, None is returned. """ match = re.search(r"(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})", release) # yyyyMMddHHmm if match: year = int(match.group(1)) month = int(match.group(2)) day = int(match.group(3)) hour = int(match.group(4)) minute = int(match.group(5)) if year >= 2000 and month >= 1 and month <= 12 and day >= 1 and day <= 31 and hour <= 23 and minute <= 59: return match.group(0) return None
c77db9ec41341e4b08730d56e8e316023b8ce81f
104,650
import re def simple_tokenize(document): """ Clean up a document and split into a list of words. Converts document (a string) to lowercase and strips out everything which is not a lowercase letter. """ document = document.lower() document = re.sub('[^a-z0-9]', ' ', document) return document.strip().split()
28b793c02988d8049a5f35d864350af00dc1d0db
104,652
def dd_duration_map_nb(record): """`map_func_nb` that returns total duration of a drawdown.""" return record['end_idx'] - record['start_idx']
b6711c9ea70bf04ca4a6d26d219d939ab159225e
104,656
def is_numeric(value): """This function checks whether or not a value is numeric either as an integer or a numeric string. .. versionadded:: 2.3.0 :param value: The value to be examined :type value: str, int :returns: Boolean value indicating if the examined value is numeric """ return True if type(value) == int or (type(value) == str and value.isnumeric()) else False
8218652ff4029775feda6143ba019320f066c1da
104,657
def pres_text(trial): """ input: current presentation trial # (int) output: presentation instruction text (string) for given presentation trial """ pres1 = ' Now we will begin the main experiment! ' \ 'Again you will see cue icons, followed by a series of image pairs and letters (and a fixation cross).' \ '\n\n Remember to: ' \ '\n\n Keep your eyes staring at the cross' \ '\n Shift your attention to the SAME cued side and part for EACH pair' \ '\n Immeditaely press 1 ("x") or 3 ("o") when you see the letter ' \ '\n\n Do you have questions? Ask them now! ' \ '\n Otherwise, position your hand over the 1 and 3 buttons, clear your mind, and press any key to begin. ' pres2 = ' Feel free to take a moment to rest, if you like! ' \ ' When you\'re ready, we will do another round with a cue, followed by image pairs and letters.' \ ' \n\n Remember to: ' \ '\n Keep your eyes staring at the cross' \ '\n Shift your attention to the SAME cued side and part for EACH pair' \ '\n Immeditaely press 1 ("x") or 3 ("o") when you see the letter ' \ '\n\n Press any key to begin. ' instructions = [pres1, pres2] if trial >= 1: num = 1 else: num = 0 return(instructions[num])
aa905aa513ea83a25286ce3b8db22dcbfa3d148c
104,660
def group_signals(signal_list, tolerance=0.1): """Groups signals into signal classes using a given tolerance value. Parameters ---------- signal_list : list of Signal List members are either all Pulse objects or all Gap objects Returns ------- list of list of Signal Each sublist contains the sorted durations believed to be variants of a single type of signal. """ sorted_signals = sorted(signal_list, key=lambda x: x.length) signal_grouping = [[]] group_id = 0 max_tol = 1 + tolerance signal_grouping[group_id].append(sorted_signals[0]) for a, b in zip(sorted_signals[:-1], sorted_signals[1:]): if b.length < a.length * max_tol: signal_grouping[group_id].append(b) else: signal_grouping.append([b]) group_id += 1 return signal_grouping
bf724bfd9d354028ca0140fa867e88f675652494
104,661
import yaml def yaml_from_dict(dictionary): """Gets the YAML representation of a dict.""" return yaml.safe_dump(dictionary, default_flow_style=False)
a412e0aa88489eb565012fe4dc597272ebd24c8e
104,663
def MakeResult(output_api, message, committing, modified_files=None): """Makes a gcl result. Makes a PresubmitError result if |committing| is True, otherwise makes a PresubmitNotifyResult.""" if not modified_files: modified_files = [] if committing: return output_api.PresubmitError(message, modified_files) else: return output_api.PresubmitNotifyResult(message, modified_files)
288c23261b35b54cec5ecd94036ec6a527413edd
104,667
from typing import MutableSequence from typing import MutableMapping def to_plain_python(obj): """Convert nested structure mixing dict/list objects with dict/lits (such as the output) of loading via ``ruamel.yaml`` to using builtin dict/list only. """ if isinstance(obj, (list, MutableSequence)): return [to_plain_python(elem) for elem in obj] elif isinstance(obj, (dict, MutableMapping)): return {key: to_plain_python(elem) for key, elem in obj.items()} else: return obj
a7ea06435fa110351df2ceae01e5e3b98ea47219
104,668
def get_event_data_section(raw): """ Helper to get the 'EventData' or 'UserData' key of the base XML for each record. If this fails, it is very likely that the record is corrupted due to the log recovery software. :param raw: :return: whether or not the key exists. """ if 'EventData' not in raw['Event'] and 'UserData' not in raw['Event']: return False return True
009519404029d5bea928778d230765a9fcc22b52
104,677
import threading def threaded(fn): """ Thread wrapper shortcut using @threaded prefix Args: fn (function): The function to executed on a new thread. Returns: (thread): New thread for executing function. """ def wrapper(*args, **kwargs): thread = threading.Thread(target=fn, args=args, kwargs=kwargs) thread.daemon = True thread.start() return thread return wrapper
ace3c08259a3777d4bf69183933628865ec6b8ee
104,680
def parse2tuple(inp_str): """ function for parsing a 2 tuple of integers :param inp_str: string of the form: '(3, 3)' :return: tuple => parsed tuple """ inp_str = inp_str[1: -1] # remove the parenthesis args = inp_str.split(',') args = tuple(map(int, args)) return args
436c5eb4578bec5dfb6167a9739d7d8463964d5a
104,685
def hamming_distance(s1, s2): """ Get Hamming distance: the number of corresponding symbols that differs in given strings. """ return sum(i != j for i, j in zip(s1, s2))
7086fae0c2dc6a023178679efdbbeed717fe58ed
104,686
def select_date_range(df, start_date, end_date, drop_zero_cols=True): """Extract a range of dates from a dataframe with a datetime index, then remove any columns which are left empty (full of zeros).""" df_slice = df.copy() if start_date is not None: df_slice = df_slice[df_slice.index >= start_date] if end_date is not None: df_slice = df_slice[df_slice.index <= end_date] if drop_zero_cols: nonzero_cols = df_slice.columns[~(df_slice == 0).all()] df_slice = df_slice[nonzero_cols] return df_slice
ed7609c4d122846e0c54382a1433bb9e734d475a
104,688
def apply_series_method(df, column, method, **method_kws): """This window function selects a column and calls a method on that Series. Parameters: df (DataFrame): pandas dataframe column (str): Column to select from the dataframe method (str): Method to call on the the selected Series. **method_kws: Passed to the method Returns: any: result of getattr(df[column], method)(**method_kws) """ agg = getattr(df[column], method) return agg(**method_kws)
ef327bf5b913ba06c734e7c0c7b35daa51a40169
104,691
def create_multiplier_function(m): """Given a multiplier m, returns a function that multiplies its input by m.""" def multiply(val): return m*val return multiply
47aac306ef7b347c6ad9b0f951ee34397a960cc6
104,695
import pickle def load_data(file_name): """ Loads the Python object in the data folder with the given filename """ with open("data/" + file_name + ".pickle", "rb") as handle: data = pickle.load(handle) return data
8f0d69157e751bf20cde2f9d2acaa1dafe36ee8e
104,699
import re def is_fasta(filename): """Check if filename is FASTA based on extension Return: Boolean """ if re.search("\.fa*s[ta]*$", filename, flags=re.I): return True elif re.search("\.fa$", filename, flags=re.I): return True else: return False
424062726548707bd2e6fb7d8af8b3684a959542
104,703
import re def cleanPath(node): """Return the substring of a string matching chars approved for use in our URL paths.""" return re.sub(r'[^a-zA-Z0-9\-/,\.]', '', str(node), flags=re.DOTALL)
1dd59bb4dcc462930b25307869294ad7a025bd09
104,704
def cycle_slice(sliceable, start, end): """Given a list, return right hand cycle direction slice from start to end. Example:: >>> array = [0, 1, 2, 3] >>> cycle_slice(array, 1, 3) # from array[1] to array[3] [1, 2] >>> cycle_slice(array, 3, 1) # from array[3] to array[1] [3, 0] **中文文档** """ if type(sliceable) != list: sliceable = list(sliceable) length = len(sliceable) if length == 0: raise ValueError("sliceable cannot be empty!") start = start % length end = end % length if end > start: return sliceable[start:end] elif end <= start: return sliceable[start:] + sliceable[:end]
d45e7667336734178afc7bcf39845babf863f7b1
104,705
def recursive_itemgetter(data_structure, keys): """Recursively retrieve items with getitem, for mix of lists, dicts...""" curr_data_structure = data_structure for curr_key in keys: curr_data_structure = curr_data_structure[curr_key] return curr_data_structure
933f3a9332f1eb79c0a57de1b46e624a0ac3c751
104,709
def merge_rules(default_rules, custom_rules): """ Merge both rule set. Rules defined in 'custom_rules', also present in 'default_rules', will be overridden. If a new rule is present in custom_rules, not present in default_rules, it will be added. :param default_rules: base file object of rules. :type default_rules: dict :param custom_rules: override file object of rules. :type custom_rules: dict :return: final rule :rtype: dict """ for rule in custom_rules['Headers']: default_rules['Headers'][rule] = custom_rules['Headers'][rule] return default_rules
d7249d0b9a9e22c315e9c8240df2796052f6cbf8
104,712
import re def extract_emails_from_page(soup): """ Returns a list of emails found in page. Using a regex to scan """ email_pattern = re.compile('([\w\-\.+]+@(\w[\w\-]+\.)+[\w\-]+)') try: page_content = str(soup) except: print('Error parsing page. Skipped\n') return [] matches = email_pattern.findall(page_content) if matches: return [ match[0] for match in matches ] return []
2bd1501e60e8f4ff10242a559317e635a571aa20
104,716
from fcntl import ioctl from termios import FIONCLEX from termios import FIOCLEX def set_fd_inheritable(fd, inheritable): """ disable the "inheritability" of a file descriptor See Also: https://docs.python.org/3/library/os.html#inheritance-of-file-descriptors https://github.com/python/cpython/blob/65e6c1eff3/Python/fileutils.c#L846-L857 """ if inheritable: return ioctl(fd, FIONCLEX) else: return ioctl(fd, FIOCLEX)
132cd6c1a386ce7cbbada89b248a1e7eef6203aa
104,718
import random def random_sequence(n): """ Returns a random sequence of bits (0 or 1) of length n. """ return [random.choice([0, 1]) for i in range(n)]
294b1cca896351e1b03f88379cd8645e9573048a
104,719
def _is_at_level(obj, level): """Checks if `obj` is an at level `level`.""" is_leaf = not isinstance(obj, (list, tuple, dict)) if level == 0 or is_leaf: return (level == 0) == is_leaf if isinstance(obj, dict): elems = obj.values() else: elems = obj return elems and all(_is_at_level(x, level - 1) for x in elems)
abcf3bc8ef302695ee08c429a2071b22a7230079
104,721
def break_string_sequence_to_words(seq): """ Breaks a sequence containing multi-word strings into a set containing individual words. :param seq: The sequence containing multi-word strings :return: A set containing all the individual words >>> break_string_sequence_to_words(['hello world', 'foo bar', 'hello', 'red']) \ == set({'hello', 'world', 'foo', 'bar', 'red'}) True """ return {word for string in seq for word in string.split()}
52046b3b81a1e8864a4fd238a1df31fcf681b284
104,724
def get_vxlan_ip(n): """ Returns an IP address in the range 192.168.0.0 - 192.168.255.255 without using X.0 or X.255 """ quot, rem = divmod(n, 254) ip_addr = "192.168.%s.%s" % (quot + 1, rem + 1) return ip_addr
c9723871eda47506fae33b3d78c7461fe38ba4f1
104,729
def _single_element(node, name): """If node contains one or more children with nodename "name", return the first one. If not, return None""" elems = node.getElementsByTagName(name) if elems: return elems[0] else: return None
c4eb1b0bb22e826c07152f97c9deffd69458a1ab
104,733
def cast_cap_words_to_lower(string: str) -> str: """ Cast cap word format to lower case with underscores >>> cast_cap_words_to_lower("ClassName") 'class_name' >>> cast_cap_words_to_lower("AnotherOne") 'another_one' :param string: any str :return: str in lower case with underscores """ if len(string) < 1: return string lower_str = string[0].lower() for ch in string[1:]: if ch.isupper(): lower_str += "_" lower_str += ch.lower() return lower_str
5d3a8106d2c862712421613fbb308d9261d5b2bc
104,742
def interpolate_indices(x0, y0, x1, y1, x): """Linearly interpolate an int-valued function between points (`x0`, `y0`) and (`x1`, `y1`) and calculate its values at `x`.""" return (y1 * (x - x0) + y0 * (x1 - x)) // (x1 - x0)
555f18bc0f7c912db06b48a88a697dbacf75c449
104,747
def subtract(d1, d2): """Returns a set of all keys that appear in d1 but not d2. d1, d2: dictionaries """ return set(d1) - set(d2)
0c357321470e5c40c1a9b45750b61aa2af4f372c
104,749
def smooth_series(radius, col): """ Generates "smoothed" copy of input data by applying a rolling mean of the requested radius. Args: radius (int): number of values to include in rolling mean (e.g. radius = 1 means average values i, i-1 and i+1) col (pd.Series): column data to be smoothed """ # Return original column if radius is less than 1 if radius <= 0: return col window = (radius * 2) + 1 return col.rolling(window, min_periods=1, center=True).mean()
a1ad9eb1af09c10547092c3a54f816596094c5dd
104,750
import torch from typing import OrderedDict def test(model, testloader, criterion, device='cpu', t=None, best_acc=None): """Test accuracy and loss of model on a dataloader object. :param model: The model to test. :param testloader: Dataloader object with images to test. :param criterion: The loss function for measuring model loss. :param device: 'cuda' if running on gpu, 'cpu' otherwise :param t: Optional tqdm object for showing progress in terminal. :param best_acc: Optional parameter to keep track of best accuracy in case code is run in multiple iterations. :return Accuracy of test """ # Initialization of variables model.eval() test_loss = 0 correct = 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device) # Forwards pass outputs = model(inputs) # Compute loss loss = criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() # Print results to terminal if t is not None and testloader.num_workers == 0: od = OrderedDict() od['type'] = 'test' od['loss'] = test_loss / (batch_idx + 1) od['acc'] = '{:.3f}'.format(100. * correct / total) if best_acc is not None: od['test_acc'] = best_acc od['iter'] = '%d/%d' % (total, len(testloader.dataset)) t.set_postfix(ordered_dict=od) t.update(inputs.shape[0]) else: print('Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total, total, len(testloader.dataset))) acc = 100.*correct/total return acc
08eceb5d7d4e26346124cd85f96aab421f3484b7
104,757
def downsampleEvents(events, oldFS, newFS): """ Modify the timestamps of events to match a new sampling frequency. Parameters ---------- events : instance of pandas.core.DataFrame Dataframe containing list of events obtained with mne.find_events(raw) . oldFS : float The sampling frequency of the input events. newFS : float The sampling frequency to the output events. Returns: newEvents : instance of pandas.DataFrame DataFrame containing the downsampled events. """ newEvents = events.copy() newEvents[0] = (events[0]/oldFS)*newFS newEvents[0] = newEvents[0].round(0).astype('int') return newEvents
73f221f5a58ba7caf9717948fb49061439363582
104,758
import pickle def load_obj(filename): """ Loading the data that had been previously pickled using save_obj() :param filename: :return: """ with open(filename + '.pkl', 'rb') as f: return pickle.load(f)
b662a93b426554a7119d88cb68634ec08bd1f5f7
104,759
def filter_data(df, date_from=None, date_to=None, countries=[]): """filter covid data Filter covid data in pandas dataframe format with the time periods and countries provided. Parameters ---------- df : pandas dataframe The covid dataframe to filter date_from : str, optional Start date of the data range with format 'YYYY-MM-DD'. By default 'None' is used to represent earliest date available date_to : str, optional End date of data range with format 'YYYY-MM-DD'. By default 'None' is used to represent latest date available countries : list, optional List of target country names. By default 'None' is used for Canada plus United States, United Kingdom, Germany, Singapore. Returns ------- pandas.DataFrame Pandas dataframe of the selected covid data . Examples -------- >>> get_data(date_from="2022-01-01", date_to="2022-01-07", location=["Canada", "United States"]) """ query = "@date_from <= date <= @date_to" if date_from is None: date_from = df["date"].min() if date_to is None: date_to = df["date"].max() if len(countries) > 0: query += " and location in @countries" print(len(countries)) print(query) df = df.query(query) return df.copy()
adc8a6bc3f03c0ac137156ed631598e14100b315
104,761
from typing import Dict from typing import Any from typing import List def get_leaf_paths(tree: Dict[str, Any]) -> List[List[str]]: """ Find all paths Had to write the "helper" function due to some sort of weird persistence of the accumulator b/w calls! Cf https://stackoverflow.com/questions/60039297/ return-a-list-of-paths-to-leaf-nodes-from-a-nested-list-of-lists """ def helper(tree: Dict[str, Any], path: List[Any], acc: List[Any]) -> List[List[str]]: for node, children in tree.items(): if children: # not leaf helper(children, path + [node], acc) else: acc.append(path + [node]) return acc return helper(tree, [], [])
a420c4be1f04fcae361ebfa49bbf41be993c7d10
104,762
import json def rate_limit(*args): """ Mock requests to Misfit with rate limit error: 429 """ headers = { 'x-ratelimit-limit': '150', 'x-ratelimit-remaining': '148', 'x-ratelimit-reset': '1418424178' } json_content = {'error_code': 429, 'error_message': 'Rate limit exceeded'} return {'status_code': 429, 'content': json.dumps(json_content), 'headers': headers}
3938e709bac07bc53c0caa9ae7724a43bcfb57a2
104,767