content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def from_bit(val): """escape a aiodb.connector.mysql.Bit""" return f"b'{val.as_binary()}'"
f2cb84081a79d6830edaa0528d3ec4e734102312
80,350
import asyncio async def fake_task(data, second): """Fake task for testing.""" await asyncio.sleep(second) return data
adb5d200e6299e4a4a993229ebe56b22a7efc3cb
80,353
def generate_page_list(max_page, current_page, max_displayed_pages): """ Generates a list of elements for the pagination Content of returned list may look like this: [1, '...', 50, 51, 52, 53, 54, '...', 3210] or [1, 2, 3] Args: max_page: The maximum page available for the search results current_page: The current page number max_displayed_pages: How many pages before and after the current shall be displayed until '...' is showed Returns: list: Contains all page elements """ final_list = [] if max_page >= 0: # add the first page, it should always be there if we have at least one page final_list.append(1) available_pages = range(current_page - max_displayed_pages, current_page + max_displayed_pages) if 1 not in available_pages: final_list.append("...") for page in available_pages: # iterate over the 'pageregion' around our current page. Add the page numbers if they are logically valid if page > 0 and page <= max_page and page not in final_list: final_list.append(page) if max_page > final_list[len(final_list) - 1] and max_page not in final_list: # if we didn't reach the max_page yet in our 'pageregion', we need to add it to the list if max_page > final_list[len(final_list) - 1] + 1: final_list.append("...") final_list.append(max_page) return final_list
9f82377a18556972f9a230009517a5f7f212edec
80,357
import torch def sdf_capsule_to_pt(capsule_base, capsule_tip, capsule_radius, pt): """Computes distance between a capsule and a point Args: capsule_base (tensor): x,y,z in batch [b,3] capsule_tip (tensor): x,y,z in batch [b,3] capsule_radius (tensor): radius of capsule in batch [b,1] pt (tensor): query point x,y,z in batch [b,3] Returns: (tensor): signed distance (negative outside, positive inside) [b,1] """ pt_base = pt - capsule_base tip_base = capsule_tip - capsule_base h = torch.clamp(torch.dot(pt_base, tip_base) / torch.dot(tip_base,tip_base), 0.0, 1.0) dist = torch.norm(pt_base - tip_base * h) - capsule_radius return dist
e3957abcbf544bd476d502435868506894b0679d
80,362
import re def _safe_search(regex, haystack, options=0): """Searches for string, returns None if not found. Assumes that regex has on and only 1 group. Args: regex: regular expression to use with 1 group. haystack: the text to search (assumes multiline) options: regular expression options Returns: String or None """ grps = re.search(regex, haystack, options) if not grps: return None return grps.group(1)
f081d7c3a0bdfdcdf944da54d23775bcbd97fbc5
80,365
import re def parse_template_text(template_text): """Parse the template text. Return a list of user prompts and the template text with the prompts stripped out.""" pattern = '\{(.*?)\}' prompts = re.findall(pattern, template_text) template_text = re.sub(pattern, '{}', template_text) return template_text, prompts
68c142437ae2ba1ed2691d7d2b13f9f405418cc0
80,368
import requests from bs4 import BeautifulSoup def get_html_parser(link): """Given a link,return the html parser.""" code = requests.get(link) plain = code.text return BeautifulSoup(plain, "html.parser")
d6e5d35aaefed593098132c0b5f67ca2b6e0e7af
80,370
def str_or_blank(val) -> str: """Return a string or blank for None.""" if val is not None: return str(val) else: return ""
31ca8b484063480e449456ce78cc85c9174de9d3
80,371
def validate_recoveryoption_name(recoveryoption_name): """ Validate Name for RecoveryOption Property: RecoveryOption.Name """ VALID_RECOVERYOPTION_NAME = ( "admin_only", "verified_email", "verified_phone_number", ) if recoveryoption_name not in VALID_RECOVERYOPTION_NAME: raise ValueError( "RecoveryOption Name must be one of: %s" % ", ".join(VALID_RECOVERYOPTION_NAME) ) return recoveryoption_name
34e5d8fdbfdcb383c9fe23028fabc350397cdd56
80,375
from typing import Callable import click import functools def pass_context_to_kwargs(func: Callable) -> Callable: """Decorator for `click` CLIs that puts all the kwargs in the `click` context in the decorated function's invocation kwargs. This can be a more elegant way to receive the context in some cases. Arguments: func: The `click` command function to be decorated. Returns: The decorated function, which passes all kwargs in the `click` context to the invocation kwargs. """ @click.pass_context # needed to receive the context def wrapper(context: click.Context, *args, **kwargs) -> None: """Extend kwargs with the context dict parameters. Arguments: context: The decorated command's context object. """ kwargs = {**context.obj, **kwargs} if context.obj else kwargs return context.invoke(func, *args, **kwargs) return functools.update_wrapper(wrapper, func)
6fc77f72ee05afc5860e16b4cb364d620693173f
80,384
def tsi_norm( df, value_colname="Intensity", groupname_colname="sample_group_name", element_name="Metabolite", ): """ Applies Total Sum Intensity normalization; all values will be divided by the sum of all values. Like that, summing up all the values would amount to '1' Parameters ---------- df : pandas.DataFrame input dataframe, output of either the extractNamesAndIntensities() or the calculateMeanVarRSD() function columnname : string the name of the column with the data that needs to be normalized, defaults to 'Intensity' groupname_colname : string the name of the column with the sample group names, defaults to 'sample_group_name' element_name: string the name of the column with the identifiers of the tested elements (e.g. metabolites or genes). Defaults to 'Metabolite' Returns ------- output_df : pandas.DataFrame the output dataframe. It follows the same architecture as the input dataframe, just with normalized values """ pivot_df = df.pivot( index=element_name, columns=groupname_colname, values=value_colname ) pivot_df = pivot_df.div(pivot_df.sum(axis=0), axis=1) output_df = ( pivot_df.stack() # back to original shape .reset_index() # reset index, making a "0" column with intensities .merge(df.drop(columns=value_colname), how="right") # add missing data .rename(columns={0: value_colname}) # renaming the "0" column )[ df.columns ] # match original column order return output_df
3254d70b8967908c6623866e04d5822dced97086
80,395
def GetValues(file_name): """Read lines from a text file.""" values = [] with open(file_name) as f: values = f.read().splitlines() return values
ec093e8de33b4a26b31a7f557ed63b6242ef9d42
80,403
def find_anchor_s(min_id_ind, selected_x, selected_y, selected_z): """ Finds the anchor S using the minimum ID of selected anchors. S anchor must be the anchor which has the minimum ID in selected anchors. """ s_x = selected_x[min_id_ind] s_y = selected_y[min_id_ind] s_z = selected_z[min_id_ind] anch_s = [s_x, s_y, s_z] return anch_s
3ce8c1a683d7c2a76fb0fa071e24e3c282416c6a
80,405
import math def equal(x, y, tol=0.000000001): """Compare if two real numbers are equal using a tolerance to avoid rounding errors.""" return math.fabs(x - y) < tol, '%(x)f != %(y)f' % locals()
4ff023b346b7c78073e6868ee0e3981ce6d1a2c8
80,406
def find_vnic_by_mac(mac, vnics): """ Given a mac address and a list of vnics, find the vnic that is assigned that mac Parameters ---------- mac : str The MAC address. vnics : dict The list of virtual network interface cards. Returns ------- The matching vnic on success, None otherwise. """ vnic = None for v in vnics: m = v['macAddr'].lower() if mac == m: vnic = v break return vnic
dadf5342e5584f9d37c05c36a66b5829675dac74
80,408
def _atomReprAsHex(s: str) -> str: """Translate CLVM integer atom repr to a 0x-prefixed hex string.""" if s.startswith("0x"): return s elif s.startswith('"'): return "0x" + s[1:-1].encode("ascii").hex() return hex(int(s))
8da85d35fce2c93c25371c11981eafb120db9165
80,412
from datetime import datetime def parse_timestamp(timestamp): """ :param timestamp: float (common timestamp) :return: datetime--object that is computed using the passed timestamp """ dt_object = datetime.fromtimestamp(timestamp) return dt_object
fcf770711cf0c747ae1dfd6004e6b5388707def0
80,414
from functools import reduce def _calculate_num_of_value(dimensionAttr): """ Based on dimension information, caculate how many size of the list when squeeze the high dimension value into single dimension array. :param dimensionAttr: The dimension attribute :return: An integer which specifies the size of one dimension arry """ if dimensionAttr.count>1: # multi dimension channel return reduce((lambda x, y: x*y), dimensionAttr.value) else: return dimensionAttr.value
cb128ab549c76292271b6fc6a8333a9d67e280b8
80,415
def set_calibrator_flag(prows, ptable): """ Sets the "CALIBRATOR" column of a procesing table row to 1 (integer representation of True) for all input rows. Used within joint fitting code to flag the exposures that were input to the psfnight or nightlyflat for later reference. Args: prows, list or array of Table.Rows or dicts. The rows corresponding to the individual exposure jobs that are inputs to the joint fit. ptable, Table. The processing table where each row is a processed job. Returns: ptable, Table. The same processing table as input except with added rows for the joint fit job and, in the case of a stdstarfit, the poststdstar science exposure jobs. """ for prow in prows: ptable['CALIBRATOR'][ptable['INTID'] == prow['INTID']] = 1 return ptable
f4f2a96e77e4263f45d48c48dea53bcff210ae41
80,418
def get_checkpoints_to_evaluate(ckpt_dir, epochs_to_eval=(), exact_ckpts=()): """Get file names of all checkpoints to evaluate. Args: ckpt_dir: Directory from which to load checkpoints. epochs_to_eval: Additional epochs to evaluate in addition to final model. exact_ckpts: If set, additional exact checkpoint names to evaluate. Returns: The list of checkpoints to evaluate. """ # None = Current network weights (not necessarily best validation loss). ckpts_to_eval = [None] if ckpt_dir is not None: # Evaluate saved checkpoints. # ckpts_to_eval = ['ckpt_100'] # Final checkpoint. ckpts_to_eval = ['ckpt'] # Final checkpoint. for epoch in epochs_to_eval: ckpts_to_eval.append(f'ckpt_best_of_{epoch}') for ckpt in exact_ckpts: if ckpt not in ckpts_to_eval: ckpts_to_eval.append(ckpt) return ckpts_to_eval
a5be2f7bc97782df54e1439a525861a02a72e8e1
80,421
def compute_benefits(predictions, labels, binary=False): """ Converts predictions and labels into positive benefits """ if binary: return predictions else: return (predictions - labels) + 1
3407d05789b188c2c33a8ab26c2032b65bcee29f
80,422
def datetime_to_struct(dt): """Converts datetime.datetime to time.struct_time Args: dt (datetime.datetime object): the datetime.datetime object to be converted Returns: a time.struct_time object with the converted time """ return dt.timetuple()
52cb03f1f1dbfd266364b50a01099bed6c242385
80,423
def sort_timeseries(timeseries): """ This function is used to sort a time series by timestamp to ensure that there are no unordered timestamps in the time series which are artefacts of the collector or carbon-relay. So all Redis time series are sorted by timestamp before analysis. """ sorted_timeseries = [] if timeseries: sorted_timeseries = sorted(timeseries, key=lambda x: x[0]) del timeseries return sorted_timeseries
5e8378ed2f847f64a12986892f251c727e45b0b6
80,426
def filter_mT_table(df, kd_up_lim, SE_upper_lim, kd_low_lim=0, drop_dup=True): """ filters existing masstitr table filters out seqs containing * or X characters removes seqs not matching xxxPxExxx motif Parameters ---------- SE_upper_lim Will filter data for SE values with SE <= `SE_upper_lim` kd_up_lim Will remove data outside range: Kd < `kd_up_lim` kd_low_lim Will remove data outside range: `kd_low_lim` < Kd drop_dup : :obj:`bool`, optional Whether or not to drop duplicate sequences Returns ------ filtered dataframe """ # drop duplicates first just in case only a single duplicate makes the cutoff but the other(s) do not # for example, if LNLPEESDW had 2 entries with diff Kd values with one of them > kd_up_lim and the other < kd_up_lim if drop_dup: df = df.drop_duplicates(subset="AA_seq", keep=False) df = df[~df.AA_seq.str.contains("[\*X]")] df = df[df.AA_seq.str.contains(r"...P.E...")] df = df[df["SE"] <= SE_upper_lim] # df = df[df["R2"] >= R2_lower_lim] df = df[(df["Kd"] < kd_up_lim) & (df["Kd"] > kd_low_lim)] return df
e36068c6944123f1ae3d189b8519d6919e2a400c
80,430
import itertools from typing import Tuple from typing import List from typing import Dict from typing import Any def expand_config_template_variables(key_min_max_tuples: Tuple[Tuple[str, ...], ...], key_value_tuples: Tuple[Tuple[str, ...], ...]) -> List[Dict[str, Any]]: """ Helper that computes a list of key-value dictionaries from all value-combinations of key-min-max and key-value assignments. """ keys = [k for k, _, _ in key_min_max_tuples] + [k for k, _ in key_value_tuples] # noinspection PyTypeChecker values_iterators = [(range(int(min_value), int(max_value) + 1)) for _, min_value, max_value in key_min_max_tuples] \ + [(value,) for _, value in key_value_tuples] if not values_iterators: return [] return [{k: v for k, v in zip(keys, values)} for values in itertools.product(*values_iterators)]
c59bf82ab2b49cf899a93bd2798334c3d193e97d
80,431
def _get_page_source(browser, url): """ Get Wikidot page source with a Selenium-powered browser driver. :param browser: Selenium webdriver. :param url: Url to the page to fetch from. :return: A tuple in the following order: (text_page_title, text_page_source) """ browser.get(url) elem_more_options_button = browser.find_element_by_id('more-options-button') elem_more_options_button.click() elem_view_source_button = browser.find_element_by_id('view-source-button') elem_view_source_button.click() text_page_source = browser.find_element_by_class_name('page-source').text text_page_title = browser.find_element_by_id('page-title').text return text_page_title, text_page_source
37358a8e1c04b0b9aa596f47117dd74bbfff8ddb
80,441
def get_nameservers(ipv4only=True): """ Return a list of nameservers from parsing /etc/resolv.conf. If ipv4only is set, filter out ipv6 nameservers. This is because nginx freaks out in some formats of ipv6 that otherwise seem ok. """ nameservers = [] with open("/etc/resolv.conf") as f: for line in f: if line.strip().startswith("nameserver"): nameservers += line.strip().split(" ")[1:] if ipv4only: nameservers = [n for n in nameservers if ":" not in n] return nameservers
b142949be4561f92cda540b0b466d89d4538ede4
80,442
def load_common_meta(loader, filename): """ Open an existing file and return any meta-data common to all data groups (eg. a FITS primary header) as a dict-like (eg. PyFITS header) object. Parameters ---------- filename : `str` Name of an existing file from which to get the meta-data common to all groups of pixel data within that file. Returns ------- `dict`-like A meta-data dictionary or compatible object. """ return loader(filename)
4984112a4ce0e0da11120691c15c933074c46870
80,443
def str2bool(v): """Parse boolean value from string.""" return str(v).lower() in ("true", "t", "1")
0a4573f3ef31d773d8eb786f8982a98c9348781d
80,451
def name_reverse(name): """Switch family name and given name""" fam, giv = name.split(',', 1) giv = giv.strip() return giv + ', ' + fam
06815ade8f9b1c155f25fe69a64d118de62db65f
80,452
def drop_titles(tree, titles_to_drop): """ Walk the tree and drop any nodes whose titles are in `titles_to_drop`. """ def _drop_titles(subtree): new_children = [] for child in subtree["children"]: if child["title"] in titles_to_drop: continue else: new_child = _drop_titles(child) new_children.append(child) subtree["children"] = new_children return subtree return _drop_titles(tree)
99a1abeca91751703de1626251e8aa4ca115d41e
80,455
def half_up(n: int) -> int: """ Divide by 2 and round up when input is odd. even -> n//2 odd -> n//2 + 1 """ return (n + 1) // 2
393350000e4d7c9ef96bd4e8cf8ea60543eedb9d
80,456
def format_decimalized_number(number, decimal=1): """Format a number to display to nearest metrics unit next to it. Do not display digits if all visible digits are null. Do not display units higher then "Tera" because most of people don't know what a "Yotta" is. >>> format_decimalized_number(123_456.789) 123.5k >>> format_decimalized_number(123_000.789) 123k >>> format_decimalized_number(-123_456.789) -123.5k >>> format_decimalized_number(0.789) 0.8 """ for unit in ['', 'k', 'M', 'G']: if abs(number) < 1000.0: return "%g%s" % (round(number, decimal), unit) number /= 1000.0 return "%g%s" % (round(number, decimal), 'T')
f44cdbb6f012157aec2cc5d860668449c5e12b56
80,457
def any_in(a_set, b_set): """ Boolean variable that is ``True`` if elements in a given set `a_set` intersect with elements in another set `b_set`. Otherwise, the boolean is ``False``. Parameters ___________ a_set : list First set of elements. b_set : list Second set of elements. Returns ________ not set(a_set).isdisjoint(b_set) : bool Boolean that is ``True`` if there is a non-empty intersection between both sets. Otherwise, the boolean is ``False``. """ return not set(a_set).isdisjoint(b_set)
76aedadfbca0dc9f875ccfe60e438d2274e0596f
80,459
def get_space_packet_header( packet_id: int, packet_sequence_control: int, data_length: int ) -> bytearray: """Retrieve raw space packet header from the three required values""" header = bytearray() header.append((packet_id & 0xFF00) >> 8) header.append(packet_id & 0xFF) header.append((packet_sequence_control & 0xFF00) >> 8) header.append(packet_sequence_control & 0xFF) header.append((data_length & 0xFF00) >> 8) header.append(data_length & 0xFF) return header
84363b40930412c25218397012ccaf2a642825a2
80,461
from typing import Tuple def split_module_path(module_path: str) -> Tuple[str, str]: """ Parameters ---------- module_path : str e.g. "a.b.c.ClassName" Returns ------- Tuple[str, str] e.g. ("a.b.c", "ClassName") """ *m_path, cls = module_path.split(".") m_path = ".".join(m_path) return m_path, cls
d0e8a90d6da47f140a627e537eb9cd70b72a177f
80,462
def in_orbit_idx_to_sat_idx( sat_idx_in_orbit: int, orbit_idx: int, num_sat_per_orbit: int ) -> int: """Compute the satellite index in the constellation. Starting from from the satellite index in the orbit and orbit index. Args: sat_idx_in_orbit: Index of the satellite inside its orbit. orbit_idx: Index of the satellite's orbit. num_sat_per_orbit: Total number of satellites in each orbit of the constellation. Returns: int: Index of the satellite in the constellation. """ if sat_idx_in_orbit >= num_sat_per_orbit: raise ValueError( "Satellite index in orbit cannot be greater than " "the number of satellites per orbit" ) base_idx = orbit_idx * num_sat_per_orbit sat_idx = base_idx + sat_idx_in_orbit return sat_idx
d6ceca37cfd6a76165d65205708c03a8fcc288ce
80,463
def z_score(series): """ Computes the normalized value using the Z-score technique. The Z-score is a technique used for normalizing Gaussian distributions representing each observation in relation to the distribution's mean and standard deviation. For precise definitions, see the Wikipedia article: https://en.wikipedia.org/wiki/Standard_score Parameters ---------- serie: list List with sequential values to use. Returns ------- result: list List with the normalized results. """ result = (series - series.mean()) / series.std(ddof=0) return result
3cffbd1e96a79982b73eb71079536f3b2b5468a7
80,465
import re def is_string_valid_tarball_unpack_directory_name(dirname: str, package_name: str, version_number: str) -> bool: """ Check if the folder obtained by unpacking the source tarball is compliant with debian packaging :param dirname: directory to check :param package_name: name of the source package name :param version_number: name of the version :return: True if the directory obtained by unpacking the tarball has a valid name, false otherwise """ m = re.match(r"^(?P<name>[a-z0-9\-]+)-(?P<version>[a-z0-9\.\-]+)$", dirname) if m is None: return False if m.group("name") != package_name: return False if m.group("version") != version_number: return False return True
df86b85bb47edbd0eebf5e96fab2b329983289f1
80,470
def is_it_possible(e, s, e_to_seps): """Checks if it is possible to add an edge due to a separator given the current set of minimal separators. :param e (tuple(int)): candidate edge, tuple of ints (u,v) where u<v. :param s set(int): separator. :param e_to_seps (dictionary): Dictionary from edges to list of separators. :return (bool): true if it is possible to add the edge using the minimal separators. """ for u in e: num = 0 seps = set() # Count the edges that need to be added and the number of separators that can be involved in their addition for w in s: if w < u: uw = (w, u) else: uw = (u, w) if uw in e_to_seps: num += 1 seps.update(e_to_seps[uw]) if num > len(seps): return False return True
c603047b73524cae766f56ddb0861bf1575c0718
80,471
def _extract_cc_tests(bazel_rules): """Gets list of cc_test tests from bazel rules""" result = [] for bazel_rule in bazel_rules.values(): if bazel_rule['class'] == 'cc_test': test_name = bazel_rule['name'] if test_name.startswith('//'): prefixlen = len('//') result.append(test_name[prefixlen:]) return list(sorted(result))
28533687afc3bb7d978515bef5e878262eb3d06c
80,472
import json def get_message_body(message): """ Return json decoded message body from either SNS or SQS event model """ print(str(message)) try: message_text = message["body"] # catch addict default behaviour for missing keys if message_text == {}: raise KeyError except KeyError: message_text = message["Sns"]["Message"] try: message_body = json.loads(message_text) except (TypeError, json.JSONDecodeError): message_body = message_text print(str(message_body)) return message_body
9ccb3b87c254ef106a1e9820faef0210feb8b526
80,474
def get_row(music, track, pattern, chan, row): """ Return a row of a specific channel """ return music['tracks'][track]['patterns'][pattern]['rows'][row]['channels'][chan]
46f67589a32f7ac4c8809ed5282982a9be17fbdc
80,475
def _f_expl_1_euler(x0, Y0, dx, *args, dYdx=None, **kwargs): """Explicit 1st-order Euler integration scheme Parameters ---------- x0 : Intvar Integration variable at beginning of scheme Y0 : Field Variable to be integrated at the beginning of scheme dx : IntVar Stepsize of integration variable dYdx : Field, optional, default : None Current derivative. Will be calculated, if not set. args : additional positional arguments kwargs : additional keyworda arguments Returns ------- dY : Field Delta of variable to be integrated Butcher tableau --------------- 0 | 0 ---|--- | 1 """ k0 = Y0.derivative(x0, Y0) if dYdx is None else dYdx return dx*k0
4b92efa22a09dd04bf53a0eb63b8e8f9c3524bf3
80,478
import re def find_repeats(sequence, pattern): """Find repeat pattern in sequence. Function that finds repeats sequences using regular expressions and and the input pattern. Parameters ---------- sequence : String DNA sequence string. pattern : String String used to search the sequence. Returns ------- repeat_list : List List containing matches for the pattern sequence, each match corresponds to a tuple containing the sequence start, end and number of repeats. """ # List containing repeats repeat_list = [] # Find repeats ussing regular expressions and pattern hit_list = re.finditer(pattern, sequence) for repeat in hit_list: rep_start, rep_end = repeat.span() rep_num = (rep_end - rep_start)/2 repeat_list.append((rep_start+1, rep_end, rep_num)) return repeat_list
e2fad89f04473eb74501797ae06b6175d9ee1151
80,482
def load_mapping(path): """Load barcode-to-sample mapping Expects a tab-delimited file: the first column is the barcode sequence, the second column is the sample name. No header line. """ bc2sample = {} with open(path, "r") as ip: for line in ip: (bc, sample) = [field.strip() for field in line.split("\t")] bc2sample[bc] = sample return bc2sample
58a576bf2ea6a8bf4cd674b775055e5cc7469dae
80,484
def rowdf_into_imagerdd(df, final_num_partition=1): """ Reshape a DataFrame of rows into a RDD containing the full image in one partition. Parameters ---------- df : DataFrame DataFrame of image rows. final_num_partition : Int The final number of partitions. Must be one (default) unless you know what you are doing. Returns ---------- imageRDD : RDD RDD containing the full image in one partition """ return df.rdd.coalesce(final_num_partition).glom()
076f96e4076ff31d591a61764dea877901c6bc40
80,493
def robustify(x, eps): """ Adjusts x lying in an interval [a, b] so that it lies in [a+eps, b-eps] through a linear projection to that interval. """ return (1-2*eps) * x + eps
23d02718c32420c980b0710cf65c987c6e7a603f
80,494
def column_names(table_name: str, schema: str) -> str: """Return column names of the given table.""" return f""" SELECT column_name, data_type FROM information_schema.columns WHERE table_schema = '{schema}' AND table_name = '{table_name}' ORDER BY ordinal_position; """
70e66a12e1baac2963f5369c8838cab5e711b467
80,495
def device_driver(model): """ Returns the information needed to parse switch CLI output model: The model name string returns: Dictionary with the CLI command the units and output format """ return{ 'ubiquiti_edgeswitch' : ['ubiquiti_edgeswitch','show fiber-ports optics all','dBm',4,5], 'dlink_dgs' : ['ubiquiti_edgeswitch','show interfaces transceiver','mW',4,5] }[model]
04680fcb859b819d64b558f573b89705475f1885
80,496
def _handle_to_bytearray(handle): """Packs the 16-bit handle into a little endian bytearray""" assert handle <= 0xFFFF assert handle >= 0 return bytearray([handle & 0xFF, (handle >> 8) & 0xFF])
f92cc792c059b867b7c7de8ff76aae72a80711d1
80,508
def get_link(dct, idx): """Helper function to return the link at position `idx` from coredata.""" return dct['coredata'].get('link', [])[idx].get('@href')
92d608d85351545c6a72b1c494cc76ff610b5a92
80,509
def search_iterator(lines, start=0): """Search for iterators in the code Args: lines ([list]): A list with Javascript syntax as strings. start ([int, optional]): The start line to start to search by iterators. Defaults to 0. Returns: [list]: A list of tuple (index, operator) """ save_lines = list() iterator_ops = ["--", "++", "+=", "-="] for iterator_op in iterator_ops: for index in range(start, len(lines)): if iterator_op in lines[index]: save_lines.append((index, iterator_op)) return save_lines
7c1594a5aa67a390f3fbc1bc6cc9b4a556564c8e
80,511
def accuracy_nr_fn(step): """Returns if predictions are correct.""" return [ pred == groundtruth for pred, groundtruth in zip( step.state.selected_applicants, step.state.true_eligible) ]
9f000e7c9882c7fa0ffe60205fe7d1fc2c901c8c
80,514
import math def error_print(x, err=None): """ It returns a format string "value +/- error". The precision is modified according to ``err`` :param x: Value :param err: Error :return: String """ if err is None: return ("{}").format(x) if err <= 0 or math.isnan(err): return ("{} ? {}").format(x, err) d = math.ceil(math.log10(err)) b = 10 ** d b_err = err / b b_val = x / b if b_err < 0.355: # 0.100 ~ 0.354 dig = 2 elif b_err < 0.950: # 0.355 ~ 0.949 dig = 1 else: # 0.950 ~ 0.999 dig = 0 err = round(b_err, dig) * b x = round(b_val, dig) * b d_p = dig - d if d_p > 0: return ("{0:.%df} +/- {1:.%df}" % (d_p, d_p)).format(x, err) return ("{0:.0f} +/- {1:.0f}").format(x, err)
961161f1cbaa07b6e4d09c41785df117dc88986f
80,516
def do_prepend(a, b): """ Adds the specified string to the beginning of another string. https://github.com/Shopify/liquid/blob/b2feeacbce8e4a718bde9bc9fa9d00e44ab32351/test/integration/standard_filter_test.rb#L502 """ return '{}{}'.format(b, a)
e58538125929a15665576cf5cb1353d896d62edd
80,521
def area_squa(l): """Calculates the area of a square with given side length l. :Input: Side length of the square l (float, >=0) :Returns: Area of the square A (float).""" if l < 0: raise ValueError("The side length must be >= 0.") A = l**2 return A
11f0c93ca7f276c1ad2fb06c7959f802d0788282
80,523
import torch def solve_quadratic(coefficients): """Solves quadratics across batches, returns only maximum root :param coefficients: 3-tuple of torch.tensors (a, b, c) Each of (a, b, c) should be of length batch_size and all on the same device. (a, b, c) corresponds to the usual quadratic coefficients :return: torch.tensor of length batch_size """ a, b, c = coefficients sol1 = (- b + torch.sqrt(b * b - 4 * a * c)) / (2 * a) sol2 = (- b - torch.sqrt(b * b - 4 * a * c)) / (2 * a) return torch.maximum(sol1, sol2)
11de0f5a9add0d52d5862e000e3abc2345d1c2c9
80,524
def list_of_0(liste): """ This function returns for a given list, a list of number that represent the index of 0 elements in the given list. """ liste_z = [] for i in range(len(liste)): if liste[i] == 0: liste_z.append(i) return liste_z
f9b4f1d54bb83a7516a620a5d701aecb8783b9d5
80,527
def get_classes(classes_file='../data/ucf101/classInd.txt'): """ Loads a text file with class->id mapping Parameters ---------- classes_file : str, optional File path to class->id mapping text file, by default '../data/ucf101/classInd.txt' Returns ------- dict Dictionary of class names and numeral id Example: {'Class1': 1, 'Class2': 2} """ with open(classes_file, 'r') as f: classes = f.readlines() classes = map(lambda cls: cls.replace('\n','').split(' '), classes) classes = dict(map(lambda cls: (cls[1], int(cls[0])), classes)) return classes
c350f76cf46a1f509f7b3d0da88cce457f9439f8
80,530
from datetime import datetime def json_utils_default(obj): """ defaults function used by json.dumps function to make datetime values javascript compatible """ if isinstance(obj, datetime): return obj.isoformat() return obj
7b46fc9310b6a644cdc4ad219fd5db8d1de63a94
80,536
def clear() -> dict: """Clears the log.""" return {"method": "Log.clear", "params": {}}
e412c213472168c714fde27ffc1f953ffcc56fcd
80,538
def _center(rect): """ Calculate the center of a rectangle using the midpoint formula. :param rect: (x, y, w, h) tuple where (x, y) is the top-left corner, w is the width, and h is the height of the rectangle :return: (x, y) tuple where (x, y) is the center of the rectangle. """ x, y, w, h = rect return (x + x + w) / 2, (y + y + h) / 2
df5add18c6d5cbb585609fa3f3bfc3b5772e8118
80,540
def is_same_size(sicd1, sicd2): """ Are the two SICD structures the same size in pixels? Parameters ---------- sicd1 : sarpy.io.complex.sicd_elements.SICD.SICDType sicd2 : sarpy.io.complex.sicd_elements.SICD.SICDType Returns ------- bool """ if sicd1 is sicd2: return True try: return (sicd1.ImageData.NumRows == sicd2.ImageData.NumRows) and \ (sicd1.ImageData.NumCols == sicd2.ImageData.NumCols) except AttributeError: return False
e689cd91a487e8fe7418b714c9b1272b81736c28
80,551
def find_first_text(blocks, default=""): """ Find text of first text block in an iterable of blocks. Returns that text, or default, if there are no text blocks. """ for block in blocks: if block.type == "text": return block.value return default
8b92ae4275f58da7d86997f55b5fc62ce156dd92
80,554
def feature_name(name): """ @feature_name("name") def f(...): ... Annotate the name to be used when describing f as a feature. The default name when this decorator is used is the function name itself, but you can define any custom string here if you want to disambiguate or be more specific. The name provided will be used in some reports and error messages. """ def decorate(f): f._feature_name = name return f return decorate
36acee05ca8008be9614bd64889b15a2fca2fc70
80,555
def depth_to_sample(depth,depth_data): """ Convert depth to sample index """ return int((depth - depth_data['depth_start']) / depth_data['depth_per_pixel'] - 0.5)
499612948b59c28f1aece7259a2389bd8ebfc282
80,556
def read_args(parser): """Return a ``dict`` of arguments from an ``ArgumentParser``. Parses arguments using ``parser.parse_args()`` and returns a ``dict`` of all arguments that are not ``None``. """ args = parser.parse_args() args_dict = vars(args) return {k: v for k, v in args_dict.items() if v is not None}
1729b8a6df8e8031964a0c84c4bebc20b1fc2910
80,558
from typing import Any def get_value_from_tuple_by_key(choices: tuple, key) -> Any: """ Fetches the tuple value by a given key Useful for getting the name of a key from a model choice tuple of tuples. Usage: project_type_a_name = get_value_from_tuple_by_key(PROJECT_TYPE_CHOICES, PROJECT_TYPE_A) """ try: return dict(choices)[key] except KeyError: return '-'
41b820341ae4e6c32ba771a69151e38bd7936975
80,561
def class_keys_to_video_id_keys(videos): """ Transform a dictionary with keys = classes, values = video lists to a dictionary where key = video id, value = class. :param videos: Dictionary with classes as keys. :return: Dictionary with video ids as keys. """ new_videos = {} for cls, vids in videos.items(): for vid in vids: new_videos[vid] = cls return new_videos
a53f80b9cd8a906822fe5bb5f7dff58d0cfc950f
80,562
def resample_data(df, t, my_cols): """ Returns a dataframe with resampled data [mean, std, count]. Parameters: df (pandas DataFrame): dataframe t ('T', 'H', 'D') : minute, hour or day my_cols (list-like): selected columns """ df_mean = df[my_cols].resample(t).mean() df_std = df[my_cols].resample(t).std() df_count = df[my_cols].resample(t).count() return df_mean.join(df_std, rsuffix='_std').join(df_count, rsuffix='_count')
80062df5edead2b955a7d2c01cb24324d547f124
80,565
def to_uml_json_edge(**kwargs): """ Create dict to be converted to JSON for consumption by Player Piano. See Also -------- DiEdgeReporterMixin """ return { "id": kwargs["id"], "ops": [ { "op": kwargs["op"], "path": "/m2/" + kwargs["path"], "value": kwargs["value"], } ], }
8e9fa27c45dbacd4400a76f9303fc7f1e64c508c
80,566
import io def is_file_obj(o): """Test if an object is a file object""" return isinstance(o, (io.TextIOBase, io.BufferedIOBase, io.RawIOBase, io.IOBase))
94437b1bd758ae0fcc8a274a7366f01c93c8ee85
80,568
from typing import List from pathlib import Path def get_files_of_interest(path: str, file_extensions: List[str]) -> List[str]: """ Get a list of absolute paths to files with specified extension. This function searches recursively through the path. Usage e.g. >>> files = get_files_of_interest('/home/some_dir', ['*.txt', '*.log']) >>> print(files) >>> ['/home/som_dir/sub_dir/foo.txt', '/home/some_dir/bar.log'] """ files = [] for extension in file_extensions: for file in Path(path).rglob(extension): files += [file] return files
156756f2a2332d1a6b1eadac8799bde2ae021d90
80,570
def get_feeds_config(full_config): """ Returns the feeds-specifc portion of the global config. To centralized this logic. :param full_config: :return: dict that is the feeds configuration """ c = full_config.get('feeds', {}) if full_config else {} return c if c is not None else {}
0a265f654e1c6f6b6792949b26b2e70159fb4674
80,573
def sfr_evolution(z,*params): """ Madau & dickenson 2014 Arguments: z (float): redshift params: n (float) Scaling parameter. """ return (1.0025738*(1+z)**2.7 / (1 + ((1+z)/2.9)**5.6))**params[0]
d966cddde43407832fd6f7cf806ecfbbdbef86b1
80,577
def get_enum(s, enum_class): """Get an enum from a string where the enum class is assumed to have all upper-case keys, returning None if the key is not found. Args: s (str): Key of enum to find, case-insensitive. enum_class (:class:`Enum`): Enum class to search. Returns: The enum if found, otherwise None. """ enum = None if s: s_upper = s.upper() try: enum = enum_class[s_upper] except KeyError: pass return enum
0a34fcefa7c7d1fe444cf0cdf48a1450ee4b00d6
80,583
def __slice_acov__(cov, dep, given): """ Slices a covariance matrix keeping only the row associated with the dependent variable minus its self-covariance. :param cov: Covariance matrix. :param dep: Index of dependent variable. :param given: Array of indices of independent variables. :return: A 1 x |given| vector of covariances. """ row_selector = dep col_selector = [x for x in range(cov.shape[1]) if x in given] v = cov[row_selector, col_selector] return v
544f4b2370e860edf8956a8afa67eebf8e18d87a
80,587
def create_symlink(source, destination): """ Create a symlink from source to destination, if the destination is already a symlink, it will be overwritten. Args: source (pathlib.Path): path to the source destination (pathlib.Path): path to the destination """ if not destination.exists() or (destination.exists() and destination.is_symlink()): if destination.is_symlink(): destination.unlink() # overwrite existing tag destination.symlink_to(source) return True else: return False
e4e90871cb33748c5b1384a8bbfe40854e5cee0e
80,590
def calculate_coins(quarters, dimes, nickles, pennies): """Returns the amount of money in $ that user inserted. Input is the number of coins user inserted.""" total = 0.25 * quarters + 0.10 * dimes + 0.05 * nickles + 0.01 * pennies return total
2cff39e5f97d96db0da7da72cb7543804c4c2072
80,595
def readable_outputs(seq, reverse_vocab, end_symbol_id=None): """ Convert a sequence of output indices to readable string outputs from a given (reverse) vocab """ if end_symbol_id is None: return ' '.join([reverse_vocab[s] for s in seq]) else: outputs = [] for s in seq: outputs.append(reverse_vocab[s]) if s == end_symbol_id: break return ' '.join(outputs)
ecc5a21397ce9474ee973a7e2e681bdf5ed3987e
80,598
def rp_attributes( attributes_list, value_default='NA', key_len=128, value_len=128): """Return a list of dictionary elements for ReportPortal v5 attributes This filter gets lists of (strings) attributes separated by ':' and convert it to a list of dictionary elements suitable for updating attributes of launches in ReportPortal v5. Input: ['K1:V1', 'K2:V2', ...] Output: [{'key': 'K1', 'value': 'V1'}, {'key': 'K2', 'value': 'V2'}, ... ] :param attributes_list: List of attributes that should be splitted :param value_default: The default value to use if a value isn't given :param key_len: Key max length :param value_len: Value max len """ attributes = [] for attr in attributes_list: attr = attr.replace('\n', ' ').replace('\\n', ' ').strip() if ':' not in attr: attr += ':' key, value = attr.split(':', 1) if value is '': value = value_default if len(key) > key_len: key = key[:key_len - 4] + '...' if len(value) > value_len: value = value[:value_len - 4] + '...' attributes.append({'key': key, 'value': value}) return attributes
e71fe7a807d70bfbe7d0cd7a3fef68cd317aaaea
80,599
def _pytype_to_shape_fn_pytype(pytype: str) -> str: """Convert a JitOperator pytype to the type relevant in shape functions. In particular, this converts `Tensor` to `List[int]`, along with a few other special cases. """ # `Scalar` operands (which are represented with pytype "number") can # be either `int` or `float`. TorchScript has no way to represent a # signature of this type, so we hardcode it to `float`. `Scalar` # operands don't participate in shape functions (since they are # logically real-valued), so it doesn't really matter much, and # `float` helps make it clearer that it's not part of the shape # function. if pytype == "number": return "float" if pytype == "Optional[number]": return "Optional[float]" # `torch.device` is lowercase. if pytype == "Device": return "device" if pytype == "Optional[Device]": return "Optional[device]" # Shape functions only care about the shape of tensors. if pytype == "Tensor": return "List[int]" if pytype == "Optional[Tensor]": return "Optional[List[int]]" if pytype == "List[Tensor]": return "List[List[int]]" if pytype == "List[Optional[Tensor]]": return "List[Optional[List[int]]]" # Generators don't contribute to shapes, and are not scriptable currently. # So just hack them to be passed as "Any". if pytype == "Generator": return "Any" if pytype == "Optional[Generator]": return "Any" return pytype
9e27373c138908205cd175b53f6382dcc0bc7a37
80,601
def ocircle2(circle1, circle1_radius, circle2, circle2_radius): """ detects collision between two circles """ if (circle1.distance_to(circle2) <= (circle1_radius+circle2_radius)): return True else: return False
a3811f83f99eeccedfea32bcbf99f1e518c644a4
80,604
import copy def sub_mat(in_mat, my_row, my_col): """ This function returns a sub-matrix of 'my_mat', by eliminating the 'my_row' and 'my_col' elements. :param in_mat: list of lists i.e., a matrix :param my_row: int :param my_col: int :return: list of lists, i.e. a matrix """ # use deepcopy to leave untouched the original matrix my_mat = copy.deepcopy(in_mat) my_mat.pop(my_row) # remove the my_row first [col.pop(my_col) for col in my_mat] # remove the my_col column return my_mat
42a91450e4b125c9a9cfbc1d18b10e07c9754f7e
80,611
def validator(coord): """ Coordinate validator. Used to ensure that coordnates of move are within the gameboard boundaries Parameters: tuple: (x, y) Returns: boolean: Within boundaries True or False """ for x in coord: if x not in range(0, 8): return False break return True
a27ef9d4b87d8fd10e677fe68ffd540285f6fc49
80,612
def find_step_in_progress(status_str): """Parse all statuses for step that is in progress Parameters ---------- status_str : str Newline separated str of the steps and statuses Returns ------- str Current step in progress """ status_list = status_str.splitlines() # remove "Status:" string status_list.pop(0) for i in range(1, len(status_list), 2): if status_list[i] == "In Progress": return status_list[i-1][len("Step #:"):]
206d4b41f89519b76d013fb6e645f0e0193fb13d
80,614
def is_concrete(target): """Returns true if a target resolves to itself.""" targets = list(target.resolve()) return len(targets) == 1 and targets[0] == target
3e2739ad26b3d4ce57fa5374f1d8d95772b73325
80,616
def rk4_solve(CS135, CS133, XE135, XE134, XE133, XE132, BA135, fCS135, fCS133, fXE135, fXE134, fXE133, fXE132, fBA135, dt): """ This function solves the system of equations describing the dynamics of neutron-production via muon transmutation. It takes in the initial values of the problem and returns the value after 1 step of the Runge-Kutta algorithm. """ CS135_1 = fCS135(CS135, CS133, XE135, XE134, XE133, XE132, BA135)*dt CS133_1 = fCS133(CS135, CS133, XE135, XE134, XE133, XE132, BA135)*dt XE135_1 = fXE135(CS135, CS133, XE135, XE134, XE133, XE132, BA135)*dt XE134_1 = fXE134(CS135, CS133, XE135, XE134, XE133, XE132, BA135)*dt XE133_1 = fXE133(CS135, CS133, XE135, XE134, XE133, XE132, BA135)*dt XE132_1 = fXE132(CS135, CS133, XE135, XE134, XE133, XE132, BA135)*dt BA135_1 = fBA135(CS135, CS133, XE135, XE134, XE133, XE132, BA135)*dt CS135_k = CS135 + CS135_1*0.5 CS133_k = CS133 + CS133_1*0.5 XE135_k = XE135 + XE135_1*0.5 XE134_k = XE134 + XE134_1*0.5 XE133_k = XE133 + XE133_1*0.5 XE132_k = XE132 + XE132_1*0.5 BA135_k = BA135 + BA135_1*0.5 CS135_2 = fCS135(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt CS133_2 = fCS133(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt XE135_2 = fXE135(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt XE134_2 = fXE134(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt XE133_2 = fXE133(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt XE132_2 = fXE132(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt BA135_2 = fBA135(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt CS135_k = CS135 + CS135_2*0.5 CS133_k = CS133 + CS133_2*0.5 XE135_k = XE135 + XE135_2*0.5 XE134_k = XE134 + XE134_2*0.5 XE133_k = XE133 + XE133_2*0.5 XE132_k = XE132 + XE132_2*0.5 BA135_k = BA135 + BA135_2*0.5 CS135_3 = fCS135(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt CS133_3 = fCS133(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt XE135_3 = fXE135(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt XE134_3 = fXE134(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt XE133_3 = fXE133(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt XE132_3 = fXE132(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt BA135_3 = fBA135(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt CS135_k = CS135 + CS135_3 CS133_k = CS133 + CS133_3 XE135_k = XE135 + XE135_3 XE134_k = XE134 + XE134_3 XE133_k = XE133 + XE133_3 XE132_k = XE132 + XE132_3 BA135_k = BA135 + BA135_3 CS135_4 = fCS135(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt CS133_4 = fCS133(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt XE135_4 = fXE135(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt XE134_4 = fXE134(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt XE133_4 = fXE133(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt XE132_4 = fXE132(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt BA135_4 = fBA135(CS135_k, CS133_k, XE135_k, XE134_k, XE133_k, XE132_k, BA135_k)*dt CS135 = CS135 + (CS135_1 + 2*(CS135_2 + CS135_3) + CS135_4)/6 CS133 = CS133 + (CS133_1 + 2*(CS133_2 + CS133_3) + CS133_4)/6 XE135 = XE135 + (XE135_1 + 2*(XE135_2 + XE135_3) + XE135_4)/6 XE134 = XE134 + (XE134_1 + 2*(XE134_2 + XE134_3) + XE134_4)/6 XE133 = XE133 + (XE133_1 + 2*(XE133_2 + XE133_3) + XE133_4)/6 XE132 = XE132 + (XE132_1 + 2*(XE132_2 + XE132_3) + XE132_4)/6 BA135 = BA135 + (BA135_1 + 2*(BA135_2 + BA135_3) + BA135_4)/6 return CS135, CS133, XE135, XE134, XE133, XE132, BA135
609a7261f3591008d98b953351fe4da6530e86ab
80,618
import torch import math import random def prepare_data_for_node_classification(datalist, train_ratio=0.3, rnd_labeled_nodes=True): """For each graph split the nodes for training and testing. It creates a train_mask ehere elements equal to 1 are for training. rnd_labeled_nodes=True means that the nodes that are given labels for training are choosen at random (at different epochs, the same graph can have different labeled nodes).""" for graph in datalist: num_nodes = graph.num_nodes num_classes = graph.node_y.size(1) nodes_per_class = {} for node, y in enumerate(graph.node_y.argmax(1)): y = y.item() if y not in nodes_per_class: nodes_per_class[y] = [] nodes_per_class[y].append(node) train_mask = torch.zeros((num_nodes, 1)) for y in nodes_per_class.keys(): num_nodes_in_class = len(nodes_per_class[y]) num_train_nodes = math.floor(num_nodes_in_class*train_ratio) if rnd_labeled_nodes: train_nodes = random.sample(nodes_per_class[y], num_train_nodes) else: train_nodes = nodes_per_class[y][:num_train_nodes] for node in train_nodes: train_mask[node] = 1 graph.train_mask = train_mask return datalist
257d8097993d9178f0682d63d158d0e2578d2feb
80,619
def bytes_to_index(lead, tail): """ Map a pair of ShiftJIS bytes to the WHATWG index. """ lead_offset = 0x81 if lead < 0xA0 else 0xC1 tail_offset = 0x40 if tail < 0x7F else 0x41 return (lead - lead_offset) * 188 + tail - tail_offset
edf98e19d9283b2deaf442e29d50469ea6bc0954
80,623
def strip(_str): """ Convenient function to strip string. Accept C{None}. Examples: strip(None) = None strip("") = "" strip(" ") = "" strip(" abc") = "abc" strip("abc ") = "abc" strip(" ab c ") = "ab c" strip(1) # Raise AttributeError: 'int' object has no attribute 'strip' """ if not _str: return _str return _str.strip()
a5da729a68027eb45385292ef8617b3e70d9da4d
80,630
def convert_to_table_file_format(format_str): """Converts a legacy file format string to a TableFileFormat enum value. Args: format_str: A string describing a table file format that was passed to one of the functions in ee.data that takes table file formats. Returns: A best guess at the corresponding TableFileFormat enum name. """ format_str = format_str.upper() if format_str == 'GEOJSON': return 'GEO_JSON' elif format_str == 'TFRECORD': return 'TF_RECORD_TABLE' else: # It's probably "CSV" or "KML" or one of the others. # Let the server validate it. return format_str
b6a6bedac3fa18bbf25f403d4e60dbaad7ba4ffd
80,632
def init_hyper() -> dict: """Return a dictionary with the initial hyperparameters.""" hyper = { 'reg_c': 0.1, 'learning_rate': 0.1, 'loss_type': 'ww' } return hyper
8c71ca0a4ec982d3c7b3902ed3480004b2e384ec
80,633
from typing import List from typing import Dict import requests from bs4 import BeautifulSoup def get_super_hero_ids() -> List[Dict]: """Pull Super Hero IDs Scrape the Super Hero API website for IDs. ..note: No endpoint exists to pull super hero IDs from API calls. Returns: List of super hero IDs & names Raises: Exception: Error occuring normally from API connection """ try: headers={ 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'accept-language': 'en-US,en;q=0.5', 'connection': 'keep-alive', 'DNT': '1', 'host': 'superheroapi.com', 'upgrade-insecure-requests': '1', 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:65.0) Gecko/20100101 Firefox/65.0' } source = requests.get('https://superheroapi.com/ids.html', headers=headers) source = BeautifulSoup(source.text, 'html.parser') tables = source.select('.container-fluid table') ids = [] for t in tables: rows = t.select('tbody tr') for r in rows: elem = r.find_all('td') ids.append({ 'id': elem[0].text, 'name': elem[1].text }) return ids except Exception: raise Exception('Can\'t Connect to Super Hero API ID Table')
92e59f464f0c075bb74cd7fb1167acefab6097fa
80,641
def element_is_covered(u, subset, p_leq_q, p_to_q, fol): """Return `True` if some element of `subset` covers `u`. Covering is computed using `p_leq_q`. """ r = fol.let(u, p_leq_q) for y in subset: yq = {p_to_q[k]: v for k, v in y.items()} if fol.let(yq, r) == fol.true: return True return False
cd3120ed0272998dc6f67011cebc1e3f28c02ae2
80,642
def skip_comment_block(fd, comment_symbol): """Reads the initial lines of the file (represented by the file descriptor) corresponding to a comment block. All consecutive lines starting with the given symbol are considered to be part of the comment block. """ comment_block = '' line_beg = fd.tell() line = fd.readline() while line != '': if not line.startswith(comment_symbol): fd.seek(line_beg) break comment_block += line line_beg = fd.tell() line = fd.readline() return fd, comment_block
0ae1a1d110dd1ab8ab8125f99fca2ab48b97db6b
80,645
def state_hamiltonian(state): """ Given a spin state, sum through each nearest neighbor pair to find the energy of the spin hamiltonian. A factor of 1/2 is used to adjust for double counting """ N = state.shape[0] energy = 0 for ii in range(N): for jj in range(N): # Sum through sites site = state[ii, jj] #Sum nearest neighbor bonds, using modulo N to enforce periodic boundaries nn = state[(ii + 1) % N, jj] + state[(ii - 1) % N, jj] + \ state[ii, (jj + 1) % N] + state[ii, (jj - 1) % N] # Update energy, eliminate double counting energy += -0.5*site*nn return energy
3e17b789fddc1568533f6365b5c5c9ab56a3a7c0
80,646
import collections import pickle def load_autofolio(fn:str): """ Read a pickled autofolio model. Parameters ---------- fn: string The path to the file Returns ------- A namedtuple with the following fields: - scenario: ASlibScenario The aslib scenario information used to learn the model - preprocessing: list of autofolio.feature_preprocessing objects All of the preprocessing objects - pre_solver: autofolio.pre_solving.aspeed_schedule.Aspeed Presolving schedule - selector: autofolio.selector The trained pairwise selection model - config: ConfigSpace.configuration_space.Configuration The dict-like configuration information """ af = collections.namedtuple("af", "scenario,preprocessing,pre_solver,selector,config" ) with open(fn, "br") as fp: autofolio_model = pickle.load(fp) autofolio_model = af( scenario = autofolio_model[0], preprocessing = autofolio_model[1], pre_solver = autofolio_model[2], selector = autofolio_model[3], config = autofolio_model[4] ) return autofolio_model
82f5a0d3a7ec31d8e97cb08fd864df1e6ec32018
80,647
def normalize_metric_name(name): """ Makes the name conform to common naming conventions and limitations: * The result will start with a letter. * The result will only contain alphanumerics, underscores, and periods. * The result will be lowercase. * The result will not exceed 200 characters. """ if not name[0].isalpha(): name = 'x' + name name = name.lower() name = ''.join(['_' if not c.isalnum() and c != '_' and c != '.' else c for c in name]) return name[:200]
17cd43a1f31566b18d16ec6b03eab9cffbda01eb
80,653