content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import functools def Debounce(threshold=100): """ Simple debouncing decorator for apigpio callbacks. Example: `@Debouncer() def my_cb(gpio, level, tick) print('gpio cb: {} {} {}'.format(gpio, level, tick)) ` The threshold can be given to the decorator as an argument (in millisec). This decorator can be used both on function and object's methods. Warning: as the debouncer uses the tick from pigpio, which wraps around after approximately 1 hour 12 minutes, you could theoretically miss one call if your callback is called twice with that interval. """ threshold *= 1000 max_tick = 0xFFFFFFFF class _decorated(object): def __init__(self, pigpio_cb): self._fn = pigpio_cb self.last = 0 self.is_method = False def __call__(self, *args, **kwargs): if self.is_method: tick = args[3] else: tick = args[2] if self.last > tick: delay = max_tick-self.last + tick else: delay = tick - self.last if delay > threshold: self._fn(*args, **kwargs) print('call passed by debouncer {} {} {}' .format(tick, self.last, threshold)) self.last = tick else: print('call filtered out by debouncer {} {} {}' .format(tick, self.last, threshold)) def __get__(self, instance, type=None): # with is called when an instance of `_decorated` is used as a class # attribute, which is the case when decorating a method in a class self.is_method = True return functools.partial(self, instance) return _decorated
156b128ffaa579ead371bff3c4b4f20a2a05646b
3,644,400
import tkinter import os def get_folder(default_location, title_string=None): """Dialog box to browse to a folder. Returns folder path. Usage: full_folder_name = get_folder(default_location, [title]), where "default_location" is a starting folder location, "title" is an optional message to list in the dialog box, and "full_folder_name" is the complete selected folder name. Written by Phil Wilmarth, 2008, 2016 """ # set up GUI elements root = tkinter.Tk() root.withdraw() try: root.tk.call('console', 'hide') except: pass # set default title string and location if not passed if title_string is None: title_string = 'Select a folder with desired files/dirs' if not default_location: default_location = os.getcwd() # create dialog box for folder selection root.update() # helps make sure dialog box goes away after selection full_folder_name = filedialog.askdirectory(parent=root, initialdir=default_location, title=title_string, mustexist=True) # return full folder name return full_folder_name
8c02ca512d97d4122ebb8afe5a07b436a8cf6a03
3,644,401
def compute_quasisymmetry_error( R_lmn, Z_lmn, L_lmn, i_l, Psi, R_transform, Z_transform, L_transform, iota, helicity=(1, 0), data=None, ): """Compute quasi-symmetry triple product and two-term errors. f_C computation assumes transform grids are a single flux surface. Parameters ---------- R_lmn : ndarray Spectral coefficients of R(rho,theta,zeta) -- flux surface R coordinate. Z_lmn : ndarray Spectral coefficients of Z(rho,theta,zeta) -- flux surface Z coordiante. L_lmn : ndarray Spectral coefficients of lambda(rho,theta,zeta) -- poloidal stream function. i_l : ndarray Spectral coefficients of iota(rho) -- rotational transform profile. Psi : float Total toroidal magnetic flux within the last closed flux surface, in Webers. R_transform : Transform Transforms R_lmn coefficients to real space. Z_transform : Transform Transforms Z_lmn coefficients to real space. L_transform : Transform Transforms L_lmn coefficients to real space. iota : Profile Transforms i_l coefficients to real space. helicity : tuple, int Type of quasi-symmetry (M, N). Returns ------- data : dict Dictionary of ndarray, shape(num_nodes,) of quasi-symmetry errors. Key "QS_FF" is the flux function metric, key "QS_TP" is the triple product. """ data = compute_B_dot_gradB( R_lmn, Z_lmn, L_lmn, i_l, Psi, R_transform, Z_transform, L_transform, iota, data=data, ) # TODO: can remove this call if compute_|B| changed to use B_covariant data = compute_covariant_magnetic_field( R_lmn, Z_lmn, L_lmn, i_l, Psi, R_transform, Z_transform, L_transform, iota, data=data, ) M = helicity[0] N = helicity[1] # covariant Boozer components: I = B_theta, G = B_zeta (in Boozer coordinates) if check_derivs("I", R_transform, Z_transform, L_transform): data["I"] = jnp.mean(data["B_theta"] * data["sqrt(g)"]) / jnp.mean( data["sqrt(g)"] ) data["G"] = jnp.mean(data["B_zeta"] * data["sqrt(g)"]) / jnp.mean( data["sqrt(g)"] ) # QS two-term (T^3) if check_derivs("f_C", R_transform, Z_transform, L_transform): data["f_C"] = (M * data["iota"] - N) * (data["psi_r"] / data["sqrt(g)"]) * ( data["B_zeta"] * data["|B|_t"] - data["B_theta"] * data["|B|_z"] ) - (M * data["G"] + N * data["I"]) * data["B*grad(|B|)"] # QS triple product (T^4/m^2) if check_derivs("f_T", R_transform, Z_transform, L_transform): data["f_T"] = (data["psi_r"] / data["sqrt(g)"]) * ( data["|B|_t"] * data["(B*grad(|B|))_z"] - data["|B|_z"] * data["(B*grad(|B|))_t"] ) return data
6c33dcb70fe0e87f48b5b8d9e7e400ded2417257
3,644,402
def int_to_uuid(number): """ convert a positive integer to a UUID : a string of characters from `symbols` that is at least 3 letters long""" assert isinstance(number,int) and number >= 0 if number == 0: return '000' symbol_string = '' while number > 0: remainder = number % base number //= base symbol_string = encode_symbols[remainder] + symbol_string return symbol_string.rjust(3,'0')
49ce7bfeb4e11c90b2589b8b4003c3135ba78f53
3,644,403
import argparse def key_value_data(string): """Validate the string to be in the form key=value.""" if string: key, value = string.split("=") if not (key and value): msg = "{} not in 'key=value' format.".format(string) raise argparse.ArgumentTypeError(msg) return {key: value} return {}
c7d374f1d45fb49d8629a9956948603a82802f5f
3,644,404
def count_digit(n, digit): """Return how many times digit appears in n. >>> count_digit(55055, 5) 4 """ if n == 0: return 0 else: if n%10 == digit: return count_digit(n//10, digit) + 1 else: return count_digit(n//10, digit)
29cf3db8cca85e14b3b537f96246803d8176441d
3,644,405
def cal_chisquare(data, f, pepoch, bin_profile, F1, F2, F3, F4, parallel=False): """ calculate the chisquare distribution for frequency search on the pepoch time. """ chi_square = np.zeros(len(f), dtype=np.float64) t0 = pepoch if parallel: for i in prange(len(f)): phi = (data-t0)*f[i] + (1.0/2.0)*((data-t0)**2)*F1 + (1.0/6.0)*((data-t0)**3)*F2 +\ (1.0/24.0)*((data-t0)**4)*F3 + (1.0/120.0)*((data-t0)**5)*F4 phi = phi - np.floor(phi) #counts = numba_histogram(phi, bin_profile)[0] #NOTE: The histogram bin should give the edge of bin, instead of the bin number. #NOTE: For those pulse with narrow peak, it will be incorrect while calculate the chisquare counts = np.histogram(phi, np.linspace(0, 1, bin_profile+1)[:-1])[0] expectation = np.mean(counts) chi_square[i] = np.sum( (counts - expectation)**2 / expectation ) else: for i in range(len(f)): phi = (data-t0)*f[i] + (1.0/2.0)*((data-t0)**2)*F1 + (1.0/6.0)*((data-t0)**3)*F2 +\ (1.0/24.0)*((data-t0)**4)*F3 + (1.0/120.0)*((data-t0)**5)*F4 phi = phi - np.floor(phi) #counts = numba_histogram(phi, bin_profile)[0] #NOTE: The histogram bin should give the edge of bin, instead of the bin number. #NOTE: For those pulse with narrow peak, it will be incorrect while calculate the chisquare counts = np.histogram(phi, np.linspace(0, 1, bin_profile+1)[:-1])[0] expectation = np.mean(counts) chi_square[i] = np.sum( (counts - expectation)**2 / expectation ) return chi_square
13dfc33cd975758a3f6c64ff2da4f91409cfdae4
3,644,406
def get_css_urls(bundle, debug=None): """ Fetch URLs for the CSS files in the requested bundle. :param bundle: Name of the bundle to fetch. :param debug: If True, return URLs for individual files instead of the minified bundle. """ if debug is None: debug = settings.DEBUG if debug: items = [] for item in settings.MINIFY_BUNDLES['css'][bundle]: should_compile = item.endswith('.less') and getattr( settings, 'LESS_PREPROCESS', False ) if should_compile: compile_css(item) items.append('%s.css' % item) else: items.append(item) return [static(item) for item in items] else: return [static(f'css/{bundle}-min.css')]
2f16a9da213cfebd58550cab7ffd7a0438dfd840
3,644,407
def rng() -> np.random.Generator: """Random number generator.""" return np.random.default_rng(42)
2c2f88eed71c9429edc25a06890266f5b7e8fc22
3,644,408
def add_values_in_dict(sample_dict, key, list_of_values): """Append multiple values to a key in the given dictionary""" if key not in sample_dict: sample_dict[key] = list() sample_dict[key].extend(list_of_values) temp_list = sample_dict[key] temp_list = list(set(temp_list)) # remove duplicates sample_dict[key] = temp_list return sample_dict
8c30b50256fd16eb1b9eefae5cc6ab5be58fe85f
3,644,409
def parse_length(line, p) -> int: """ parse length specifer for note or rest """ n_len = voices[ivc].meter.dlen # start with default length try: if n_len <= 0: SyntaxError(f"got len<=0 from current voice {line[p]}") if line[p].isdigit(): # multiply note length fac = parse_uint() if not fac: fac = 1 n_len *= fac if line[p] == '/': # divide note length while line[p] == '/': p += 1 if line[p].isdigit(): fac = parse_uint() else: fac = 2 if n_len % fac: SyntaxError(f"Bad length divisor {line[p-1]}") return n_len n_len = n_len/fac except SyntaxError as se: print(f"{se} Cannot proceed without default length. Emergency stop.") exit(1) return n_len
cee6c83eecbea455a53c3d4ac9a778d7351b66e0
3,644,410
def get_dual_shapes_and_types(bounds_elided): """Get shapes and types of dual vars.""" dual_shapes = [] dual_types = [] layer_sizes = utils.layer_sizes_from_bounds(bounds_elided) for it in range(len(layer_sizes)): m = layer_sizes[it] m = [m] if isinstance(m, int) else list(m) if it < len(layer_sizes)-1: n = layer_sizes[it + 1] n = [n] if isinstance(n, int) else list(n) shapes = { 'lam': [1] + n, 'nu': [1] + m, 'muminus': [1] + n, 'muplus': [1] + n, 'nu_quad': [], 'muminus2': [], } types = { 'lam': utils.DualVarTypes.EQUALITY, 'nu': utils.DualVarTypes.INEQUALITY, 'muminus': utils.DualVarTypes.INEQUALITY, 'muplus': utils.DualVarTypes.INEQUALITY, 'nu_quad': utils.DualVarTypes.INEQUALITY, 'muminus2': utils.DualVarTypes.INEQUALITY, } dual_shapes.append(DualVar(**{ k: np.array(s) for k, s in shapes.items()})) dual_types.append(DualVar(**types)) else: shapes = {'nu': [1] + m, 'nu_quad': []} types = {'nu': utils.DualVarTypes.INEQUALITY, 'nu_quad': utils.DualVarTypes.INEQUALITY} dual_shapes.append(DualVarFin(**{ k: np.array(s) for k, s in shapes.items()})) dual_types.append(DualVarFin(**types)) # Add kappa N = sum([np.prod(np.array(i)) for i in layer_sizes]) dual_shapes.append(np.array([1, N+1])) dual_types.append(utils.DualVarTypes.INEQUALITY) return dual_shapes, dual_types
297a305d8ef71d614eae21c3fc5c52ef08b271a3
3,644,411
def linear_search(alist, key): """ Return index of key in alist . Return -1 if key not present.""" for i in range(len(alist)): if alist[i] == key: return i return -1
ab4c0517f9103a43509b0ba511c75fe03ea6e043
3,644,412
def overlap_integral(xi, yi, zi, nxi, nyi, nzi, beta_i, xj, yj, zj, nxj, nyj, nzj, beta_j): """ overlap <i|j> between unnormalized Cartesian GTOs by numerical integration on a multicenter Becke grid Parameters ---------- xi,yi,zi : floats Cartesian positions of center i nxi,nyi,nzi : int >= 0 powers of Cartesian primitive GTO i beta_i : float > 0 exponent of radial part of orbital i xj,yj,zj : floats Cartesian positions of center j nxj,nyj,nzj : int >= 0 powers of Cartesian primitive GTO j beta_j : float > 0 exponent of radial part of orbital j """ # unnormalized bra and ket Gaussian type orbitals def CGTOi(x,y,z): dx, dy, dz = x-xi, y-yi, z-zi dr2 = dx*dx+dy*dy+dz*dz return pow(dx, nxi)*pow(dy,nyi)*pow(dz,nzi) * np.exp(-beta_i * dr2) def CGTOj(x,y,z): dx, dy, dz = x-xj, y-yj, z-zj dr2 = dx*dx+dy*dy+dz*dz return pow(dx, nxj)*pow(dy,nyj)*pow(dz,nzj) * np.exp(-beta_j * dr2) def integrand(x,y,z): return CGTOi(x,y,z) * CGTOj(x,y,z) # place a spherical grid on each center: ri, rj atoms = [(1, (xi, yi, zi)), (1, (xj, yj, zj))] # do the integral numerically olap = becke.integral(atoms, integrand) return olap
500da2037d5e9f880f239788156cc717163b6b0c
3,644,413
def save_project_id(config: Config, project_id: int): """Save the project ID in the project data""" data_dir = config.project.data_dir filename = data_dir / DEFAULT_PROJECTID_FILENAME with open(filename, "w") as f: return f.write(str(project_id))
9769067d222c8430764a2abd4def67a9ce45e49a
3,644,414
async def session_start(): """ session_start: Creates a new database session for external functions and returns it - Keep in mind that this is only for external functions that require multiple transactions - Such as adding songs :return: A new database session """ return session_maker()
cb84d30a8a89bdf58c63114fa84558d7567396bd
3,644,415
from pm4py.objects.bpmn.obj import BPMN from pm4py.objects.bpmn.util.sorting import get_sorted_nodes_edges from typing import Optional from typing import Dict from typing import Any import tempfile def apply(bpmn_graph: BPMN, parameters: Optional[Dict[Any, Any]] = None) -> graphviz.Digraph: """ Visualize a BPMN graph Parameters ------------- bpmn_graph BPMN graph parameters Parameters of the visualization, including: - Parameters.FORMAT: the format of the visualization - Parameters.RANKDIR: the direction of the representation (default: LR) Returns ------------ gviz Graphviz representation """ if parameters is None: parameters = {} image_format = exec_utils.get_param_value(Parameters.FORMAT, parameters, "png") rankdir = exec_utils.get_param_value(Parameters.RANKDIR, parameters, "LR") font_size = exec_utils.get_param_value(Parameters.FONT_SIZE, parameters, 12) font_size = str(font_size) bgcolor = exec_utils.get_param_value(Parameters.BGCOLOR, parameters, "transparent") filename = tempfile.NamedTemporaryFile(suffix='.gv') viz = Digraph("", filename=filename.name, engine='dot', graph_attr={'bgcolor': bgcolor}) viz.graph_attr['rankdir'] = rankdir nodes, edges = get_sorted_nodes_edges(bpmn_graph) for n in nodes: n_id = str(id(n)) if isinstance(n, BPMN.Task): viz.node(n_id, shape="box", label=n.get_name(), fontsize=font_size) elif isinstance(n, BPMN.StartEvent): viz.node(n_id, label="", shape="circle", style="filled", fillcolor="green", fontsize=font_size) elif isinstance(n, BPMN.EndEvent): viz.node(n_id, label="", shape="circle", style="filled", fillcolor="orange", fontsize=font_size) elif isinstance(n, BPMN.ParallelGateway): viz.node(n_id, label="+", shape="diamond", fontsize=font_size) elif isinstance(n, BPMN.ExclusiveGateway): viz.node(n_id, label="X", shape="diamond", fontsize=font_size) elif isinstance(n, BPMN.InclusiveGateway): viz.node(n_id, label="O", shape="diamond", fontsize=font_size) elif isinstance(n, BPMN.OtherEvent): viz.node(n_id, label="", shape="circle", fontsize=font_size) for e in edges: n_id_1 = str(id(e[0])) n_id_2 = str(id(e[1])) viz.edge(n_id_1, n_id_2) viz.attr(overlap='false') viz.format = image_format return viz
ca1e25dfe758712125327717e01cba36788b8b38
3,644,416
def _predict_exp(data, paulistring): """Compute expectation values of paulistring given bitstring data.""" expectation_value = 0 for a in data: val = 1 for i, pauli in enumerate(paulistring): idx = a[i] if pauli == "I": continue elif pauli == "X": ls = [1, 1, -1, -1] elif pauli == "Y": ls = [-1, 1, 1, -1] elif pauli == "Z": ls = [1, -1, 1, -1] val *= ls[idx] expectation_value += val / len(data) return expectation_value
32737920e750780655ba85ae9e57d6e3cd0f194c
3,644,417
import math def AGI(ymod1, c02500, c02900, XTOT, MARS, sep, DSI, exact, nu18, taxable_ubi, II_em, II_em_ps, II_prt, II_no_em_nu18, c00100, pre_c04600, c04600): """ Computes Adjusted Gross Income (AGI), c00100, and compute personal exemption amount, c04600. """ # calculate AGI assuming no foreign earned income exclusion c00100 = ymod1 + c02500 - c02900 + taxable_ubi # calculate personal exemption amount if II_no_em_nu18: # repeal of personal exemptions for deps. under 18 pre_c04600 = max(0, XTOT - nu18) * II_em else: pre_c04600 = XTOT * II_em if DSI: pre_c04600 = 0. # phase-out personal exemption amount if exact == 1: # exact calculation as on tax forms line5 = max(0., c00100 - II_em_ps[MARS - 1]) line6 = math.ceil(line5 / (2500. / sep)) line7 = II_prt * line6 c04600 = max(0., pre_c04600 * (1. - line7)) else: # smoothed calculation needed for sensible mtr calculation dispc_numer = II_prt * (c00100 - II_em_ps[MARS - 1]) dispc_denom = 2500. / sep dispc = min(1., max(0., dispc_numer / dispc_denom)) c04600 = pre_c04600 * (1. - dispc) return (c00100, pre_c04600, c04600)
aed1c311bc6b46b46bfea3e9756cd73933c37ca9
3,644,418
def tokenize(headline_list): """ Takes list of headlines as input and returns a list of lists of tokens. """ tokenized = [] for headline in headline_list: tokens = word_tokenize(headline) tokenized.append(tokens) return tokenized
e5cf957a72d6d08d95787bf0f2222e525727c54a
3,644,419
def create_en_sentiment_component(nlp: Language, name: str, force: bool) -> Language: """ Allows the English sentiment to be added to a spaCy pipe using nlp.add_pipe("asent_en_v1"). """ LEXICON.update(E_LEXICON) return Asent( nlp, name=name, lexicon=LEXICON, intensifiers=INTENSIFIERS, negations=NEGATIONS, contrastive_conjugations=CONTRASTIVE_CONJ, lowercase=True, lemmatize=False, force=force, )
f66ab4d86da2d42adb7c8da95cfdff9517dcc34f
3,644,420
import json def lambda_handler(event, context): """ 店舗一覧情報を返す Parameters ---------- event : dict フロントより渡されたパラメータ context : dict コンテキスト内容。 Returns ------- shop_list : dict 店舗一覧情報 """ # パラメータログ logger.info(event) try: shop_list = get_shop_list() except Exception as e: logger.exception('Occur Exception: %s', e) return utils.create_error_response('Error') body = json.dumps( shop_list, default=utils.decimal_to_int, ensure_ascii=False) return utils.create_success_response(body)
48eebf18d34e50a98d00bd589b9d8a0712b9f985
3,644,421
import warnings def mask_land_ocean(data, land_mask, ocean=False): """Mask land or ocean values using a land binary mask. Parameters ---------- data: xarray.DataArray This input array can only have one of 2, 3 or 4 dimensions. All spatial dimensions should coincide with those of the land binary mask. land_mask: xarray.DataArray This array must have the same spatial extent as the input data. Though it can have different times or levels. It can be binary or not, because internally it will make sure of it. Sometimes these masks actually contain a range of values from 0 to 1. ocean: bool, optional Whether the user wants to mask land or ocean values. Default is to mask ocean values (False). Returns ------- xarray.Datarray same as input data but with masked values in either land or ocean. """ # noqa # remove numpy warning regarding nan_policy msg = 'Mean of empty slice' warnings.filterwarnings('ignore', message=msg) # get number of dimensions of both data arrays ndim_ds = len(data.dims) ndim_lm = len(land_mask.dims) # get dimensions of dataset if ndim_ds == 2: ntim = None nlat, mlon = data.shape elif ndim_ds == 3: ntim, nlat, mlon = data.shape elif ndim_ds == 4: ntim, nlev, nlat, mlon = data.shape else: msg = 'only 2, 3 or 4 dimensions allowed for data set' raise TypeError(msg) # get dimensions of land mask if ndim_lm == 2: lntim = None lnlat, lmlon = land_mask.shape elif ndim_lm == 3: lntim, lnlat, lmlon = land_mask.shape else: msg = 'only 2 or 3 dimensions allowed for land mask' raise TypeError(msg) # make sure dims agree if nlat != lnlat or mlon != lmlon: msg = 'spatial coordinates do not agree' raise ValueError(msg) # get a single land mask if many if lntim is not None or lntim == 1: land_mask = land_mask[0] # convert mask to binary if not already land_mask = binary_mask(land_mask) # create mask 1 (land) = True, 0 (ocean) = False mask = land_mask.values == 1 # tile mask to number of times if ndim_ds == 2: tmask = mask elif ndim_ds == 3: tmask = np.tile(mask, (ntim, 1, 1)) else: tmask = np.tile(mask, (ntim, 1, 1, 1)) # create masked array values = np.array(data.values) if ocean is True: maskval = np.ma.masked_array(values, tmask) else: maskval = np.ma.masked_array(values, tmask == False) # noqa E712 # replace values newdata = data.copy() newdata.values = maskval return newdata
97d32c2720db12e47738a58d2152d7052af095ed
3,644,422
def create_project_type(project_type_params): """ :param project_type_params: The parameters for creating an ProjectType instance -- the dict should include the 'type' key, which specifies the ProjectType subclass name, and key/value pairs matching constructor arguments for that ProjectType subclass. :type project_type_params: dict :return: The project_type instance :rtype: project_type.project_type.ProjectType """ project_type_params = project_type_params.copy() project_type_name = project_type_params.pop('type') project_type_class = get_project_type_subclass(project_type_name) if project_type_class: return project_type_class(**project_type_params) # create object using project_type_params as constructor args # Not yet implemented other project types return None
446961674985a2f5a64417d6f1f9bc6b39f7fbe4
3,644,423
def env_str(env_name: str, default: str) -> str: """ Get the environment variable's value convert into string """ return getenv(env_name, default)
529adfcfe770a39a5997d92792fdd2c857b32a41
3,644,424
def server_bam_statistic(ip, snmp_config_data): """ :param ip: :param snmp_config_data: :return: """ try: var_binds = get_snmp_multiple_oid(oids=oids_bam, ip=ip, snmp_config_data=snmp_config_data) server_memory_usage = get_memory_usage(var_binds) server_cpu_usage = get_cpu_process(ip=ip, snmp_config_data=snmp_config_data) except Exception as ex: raise ex logger.debug("Server_bam_statistic: mem_usage: {} - cpu_usage: {}".format(server_memory_usage, server_cpu_usage)) return server_memory_usage, server_cpu_usage
9a22c2dc339defad39df3edd55e2c3f00fc91763
3,644,425
def extract_sigma_var_names(filename_nam): """ Parses a 'sigma.nam' file containing the variable names, and outputs a list of these names. Some vector components contain a semicolon in their name; if so, break the name at the semicolon and keep just the 1st part. """ var_names = [] with open(filename_nam, 'r') as file: for line in file: var_name = line.strip() # check for semicolon if ';' in var_name: var_name = var_name.split(';')[0] var_names.append(var_name) return var_names
930e855d47c4303cac28e9973982392489fb577d
3,644,426
def vectors_to_arrays(vectors): """ Convert 1d vectors (lists, arrays or pandas.Series) to C contiguous 1d arrays. Arrays must be in C contiguous order for us to pass their memory pointers to GMT. If any are not, convert them to C order (which requires copying the memory). This usually happens when vectors are columns of a 2d array or have been sliced. If a vector is a list or pandas.Series, get the underlying numpy array. Parameters ---------- vectors : list of lists, 1d arrays or pandas.Series The vectors that must be converted. Returns ------- arrays : list of 1d arrays The converted numpy arrays Examples -------- >>> import numpy as np >>> import pandas as pd >>> data = np.array([[1, 2], [3, 4], [5, 6]]) >>> vectors = [data[:, 0], data[:, 1], pd.Series(data=[-1, -2, -3])] >>> all(i.flags.c_contiguous for i in vectors) False >>> all(isinstance(i, np.ndarray) for i in vectors) False >>> arrays = vectors_to_arrays(vectors) >>> all(i.flags.c_contiguous for i in arrays) True >>> all(isinstance(i, np.ndarray) for i in arrays) True >>> data = [[1, 2], (3, 4), range(5, 7)] >>> all(isinstance(i, np.ndarray) for i in vectors_to_arrays(data)) True """ arrays = [as_c_contiguous(np.asarray(i)) for i in vectors] return arrays
c9a3878f2d1099ffd985525931f05df1b8631c46
3,644,427
import random import string def random_name_gen(size=6): """Generate a random python attribute name.""" return ''.join( [random.choice(string.ascii_uppercase)] + [random.choice(string.ascii_uppercase + string.digits) for i in range(size - 1)] ) if size > 0 else ''
67ade3cde47fffc126cbdb11f01ffda2672d021c
3,644,428
import logging import pandas import numpy def load_player_history_table(div_soup): """Parse the HTML/Soup table for the numberfire predictions. Returns a pandas DataFrame """ if not div_soup: return None rows = div_soup.findAll('tr') table_header = [x.getText() for x in rows[0].findAll('th')] table_data = [[x.getText() for x in row.findAll('td')] for row in rows[1:]] if not table_data: logging.debug("No predictions found!") return None table = pandas.io.parsers.TextParser(table_data, names=table_header, index_col=table_header.index('Date'), parse_dates=True).read() # Next we want to separate combined projection stats like FGM-A into separate columns for FGM and FGA dash_cols = [col for col in table.columns if '-' in col] for col in dash_cols: name_parts = col.split('-') series1name = name_parts[0] series2name = name_parts[0][:-1] + name_parts[1] series1data = table[col].apply(lambda data: float(data.split('-')[0])) series2data = table[col].apply(lambda data: float(data.split('-')[1])) table[series1name] = pandas.Series(data=series1data, name=series1name, index=table.index, dtype=numpy.dtype('float')) table[series2name] = pandas.Series(data=series2data, name=series2name, index=table.index, dtype=numpy.dtype('float')) table.drop(dash_cols, axis=1, inplace=True) return table
64b433bf01f0857714cebb4df677a20578044020
3,644,429
def is_identity(u, tol=1e-15): """Test if a matrix is identity. Args: u: np.ndarray Matrix to be checked. tol: float Threshold below which two matrix elements are considered equal. """ dims = np.array(u).shape if dims[0] != dims[1]: raise Exception("Input matrix is not square.") return np.allclose(u, np.eye(u.shape[0]), atol=tol)
160f33651b9d79448167423542cd1e2ad0bd3110
3,644,430
def get_metrics(): """ Collects various system metrics and returns a list of objects. """ metrics = {} metrics.update(get_memory_metrics()) metrics.update(get_cpu_metrics()) metrics.update(get_disk_metrics()) return metrics
d4055e0eb23d9babb9882bc3c089af293c638def
3,644,431
def getPost(blog_id, username, password, post_id, fields=[]): """ Parameters int blog_id string username string password int post_id array fields: Optional. List of field or meta-field names to include in response. """ logger.debug("%s.getPost entered" % __name__) user = get_user(username, password) post = Post.objects.get(id=post_id) check_perms(user, post) return _post_struct(post)
4d3c481bff63a7aa425b6f98d910039bf1a9cbaf
3,644,432
import inspect def create_fun(name: str, obj, options: dict): """ Generate a dictionnary that contains the information about a function **Parameters** > **name:** `str` -- name of the function as returned by `inspect.getmembers` > **obj:** `object` -- object of the function as returned by `inspect.getmembers` > **options:** `dict` -- extended options **Returns** > `dict` -- with keys: > - *name*, *obj* -- the function name and object as returned by `inspect.getmembers` > - *module* -- name of the module > - *path* -- path of the module file > - *doc* -- docstring of the function > - *source* -- source code of the function > - *args* -- arguments of the function as a `inspect.signature` object """ ignore_prefix = options.get("ignore_prefix") if ignore_prefix is not None and name[:len(ignore_prefix)]==ignore_prefix: return None fun = {} fun["name"] = name if name else 'undefined' fun["obj"] = obj fun["module"] = inspect.getmodule(obj).__name__ fun["path"] = inspect.getmodule(obj).__file__ fun["doc"] = inspect.getdoc(obj) or "" fun["source"] = rm_docstring_from_source(inspect.getsource(obj)) fun["args"] = inspect.signature(obj) return fun
f95e6fab1ed0cf6a10574b790e81933c40b924c4
3,644,433
def serial_ss(file_read, forward_rate, file_rateconstant, file_energy, matrix, species_list, factor, initial_y, t_final, third_body=None, chemkin_data=None, smiles=None, chemkin=True): """ Iteratively solves the system of ODEs for different rate constants generated from the data file in serial Parameters ---------- file_read : str path of the 'param_set' file where all the parameter combinations are listed forward_rate : list A list of forward reaction rates for all the reactions in the mechanism file_rateconstant : str path to the file `complete_rateconstantlist.dat` file_energy : str path to the file 'free_energy_library.dat' matrix : ndarray stoichiometric matrix species_list : list A list of unique species in the mechanism initial_y : list A list of initial concentrations t_final : float final time in seconds third_body : ndarray matrix with third body efficiencies chemkin_data :ndarray the data from parsed chemkin reaction file smiles : dict the smiles dictionary generated from species_smiles.dat file factor : float conversion factor from given unit of energy to kJ chemkin : bool indicates if chemkin files are read as input files default = True Returns ---------- : list A list of final concentrations of all the species at t_final for all the given combinations of parameters listed in 'param_set.txt' file """ read_file = open(file_read, "r") results = [] for pos, data in enumerate(read_file): result = func_solv(data, forward_rate, file_rateconstant, file_energy, matrix, species_list, initial_y, t_final, factor, third_body, pos, chemkin_data, smiles) results.append(result) return results
f23693826c3507d9a2327b4b492f20469fca963f
3,644,434
def contains_left_button(buttons) -> bool: """ Test if the buttons contains the left mouse button. The "buttons" should be values returned by get_click() or get_mouse() :param buttons: the buttons to be tested :return: if the buttons contains the left mouse button """ return (buttons & QtCore.Qt.LeftButton) > 0
a5cde64ce1d1fa5fd1fe57988f8b60db03fc2dcf
3,644,435
from typing import List from typing import Dict def extract_interest_from_import_batch( import_batch: ImportBatch, interest_rt: ReportType) -> List[Dict]: """ The return list contains dictionaries that contain data for accesslog creation, but without the report_type and import_batch fields """ # now we compute the interest data from it # go through the interest metrics and extract info about how to remap the values interest_metrics = [] metric_remap = {} metric_to_ig = {} # TODO: if we preselected the import_batches before submitting them here # we could remove the whole test here, which create a query for each import batch if import_batch.report_type not in import_batch.platform.interest_reports.all(): # the report_type does not represent interest for this platform, we can skip it logger.debug('Import batch report type not in platform interest: %s - %s', import_batch.report_type.short_name, import_batch.platform) return [] for rim in import_batch.report_type.reportinterestmetric_set.all().\ select_related('interest_group'): if rim.target_metric_id: metric_remap[rim.metric_id] = rim.target_metric_id interest_metrics.append(rim.metric_id) metric_to_ig[rim.metric_id] = rim.interest_group # remap interest groups into DimensionText metric_to_dim1 = {} dim1 = interest_rt.dimensions_sorted[0] for metric_id, ig in metric_to_ig.items(): # we do not use update_or_create here, because it creates one select and one update # even if nothing has changed dim_text, _created = DimensionText.objects.get_or_create( dimension=dim1, text=ig.short_name, defaults={'text_local_en': ig.name_en, 'text_local_cs': ig.name_cs}) if dim_text.text_local_en != ig.name_en or dim_text.text_local_cs != ig.name_cs: dim_text.text_local_en = ig.name_en dim_text.text_local_cs = ig.name_cs dim_text.save() metric_to_dim1[metric_id] = dim_text.pk # get source data for the new logs new_logs = [] # for the following dates, there are data for a superseeding report type, so we do not # want to created interest records for them clashing_dates = {} if import_batch.report_type.superseeded_by: if hasattr(import_batch, 'min_date') and hasattr(import_batch, 'max_date'): # check if we have an annotated queryset and do not need to compute the min-max dates min_date = import_batch.min_date max_date = import_batch.max_date else: date_range = import_batch.accesslog_set.aggregate(min_date=Min('date'), max_date=Max('date')) min_date = date_range['min_date'] max_date = date_range['max_date'] if min_date and max_date: # the accesslog_set might be empty and then there is nothing that could be clashing clashing_dates = { x['date'] for x in import_batch.report_type.superseeded_by.accesslog_set. filter(platform_id=import_batch.platform_id, organization_id=import_batch.organization_id, date__lte=max_date, date__gte=min_date). values('date') } for new_log_dict in import_batch.accesslog_set.filter(metric_id__in=interest_metrics).\ exclude(date__in=clashing_dates).\ values('organization_id', 'metric_id', 'platform_id', 'target_id', 'date').\ annotate(value=Sum('value')).iterator(): # deal with stuff related to the metric metric_id = new_log_dict['metric_id'] # fill in dim1 based on the interest group of the metric new_log_dict['dim1'] = metric_to_dim1[metric_id] # remap metric to target metric if desired new_log_dict['metric_id'] = metric_remap.get(metric_id, metric_id) new_logs.append(new_log_dict) return new_logs
cbe26e47d5f5214e0599154ae5d3357370fed123
3,644,436
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload a config entry.""" _LOGGER.debug("Disconnecting from spa") spa: BalboaSpaWifi = hass.data[DOMAIN][entry.entry_id] if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS): hass.data[DOMAIN].pop(entry.entry_id) await spa.disconnect() return unload_ok
87216dec99ab9ff14d1ab1f381ea440c61e75fa6
3,644,437
def is_bst(t: BST) -> bool: """Returns true if t is a valid BST object, false otherwise. Invariant: for each node n in t, if n.left exists, then n.left <= n, and if n.right exists, then n.right >= n.""" if not isinstance(t, BST): return False if t._root and t._root.parent is not None: return False return all_bst_nodes(t._root) and has_bst_property(t._root)
5180d01f8306f79b6ed4551e7d2bd4046a088ac2
3,644,438
def are_embedding_layer_positions_ok_for_testing(model): """ Test data can only be generated if all embeddings layers are positioned directly behind the input nodes """ def count_embedding_layers(model): layers = model.layers result = 0 for layer in layers: if isinstance(layer, keras.layers.Embedding): result += 1 layer_type = type(layer).__name__ if layer_type in ['Model', 'Sequential']: result += count_embedding_layers(layer) return result def count_embedding_layers_at_input_nodes(model): result = 0 for input_layer in get_model_input_layers(model): if input_layer._outbound_nodes and isinstance( input_layer._outbound_nodes[0].outbound_layer, keras.layers.Embedding): result += 1 return result return count_embedding_layers(model) == count_embedding_layers_at_input_nodes(model)
4317cdc11e0b0acf84fda0c2633400851075e124
3,644,439
from typing import Any def all_tasks_stopped(tasks_state: Any) -> bool: """ Checks if all tasks are stopped or if any are still running. Parameters --------- tasks_state: Any Task state dictionary object Returns -------- response: bool True if all tasks are stopped. """ for t in tasks_state["tasks"]: if t["lastStatus"] in ("PENDING", "RUNNING"): return False return True
98edffe71052cc114a7dda37a17b3a346ef59ef8
3,644,440
import PIL def enhance_color(image, factor): """Change the strength of colors in an image. This function has identical outputs to ``PIL.ImageEnhance.Color``. Added in 0.4.0. **Supported dtypes**: * ``uint8``: yes; fully tested * ``uint16``: no * ``uint32``: no * ``uint64``: no * ``int8``: no * ``int16``: no * ``int32``: no * ``int64``: no * ``float16``: no * ``float32``: no * ``float64``: no * ``float128``: no * ``bool``: no Parameters ---------- image : ndarray The image to modify. factor : number Colorfulness of the output image. Values close to ``0.0`` lead to grayscale images, values above ``1.0`` increase the strength of colors. Sane values are roughly in ``[0.0, 3.0]``. Returns ------- ndarray Color-modified image. """ return _apply_enhance_func(image, PIL.ImageEnhance.Color, factor)
f6654fc0b4dfebecf221e4a34ec0c894e1c72d1f
3,644,441
def random_deceleration(most_comfortable_deceleration, lane_pos): """ Return a deceleration based on given attribute of the vehicle :param most_comfortable_deceleration: the given attribute of the vehicle :param lane_pos: y :return: the deceleration adopted by human driver """ if lane_pos: sigma = 0.3 else: sigma = 0.5 return np.random.normal(most_comfortable_deceleration, sigma)
c5e4f9ca16285c020b9b7f2376e0b43f198d5173
3,644,442
def dataclass_fields(dc): """Returns a dataclass's fields dictionary.""" return {name: getattr(dc, name) for name in dc.__dataclass_fields__}
4b82af3bfbc02f7bbfcf1aecb6f6501ef10d86e1
3,644,443
import os import subprocess def gets_ontology_statistics(file_location: str, owltools_location: str = './pkt_kg/libs/owltools') -> str: """Uses the OWL Tools API to generate summary statistics (i.e. counts of axioms, classes, object properties, and individuals). Args: file_location: A string that contains the file path and name of an ontology. owltools_location: A string pointing to the location of the owl tools library. Returns: stats: A formatted string containing descriptive statistics. Raises: TypeError: If the file_location is not type str. OSError: If file_location points to a non-existent file. ValueError: If file_location points to an empty file. """ if not isinstance(file_location, str): raise TypeError('file_location must be a string') elif not os.path.exists(file_location): raise OSError('{} does not exist!'.format(file_location)) elif os.stat(file_location).st_size == 0: raise ValueError('{} is empty'.format(file_location)) else: output = subprocess.check_output([os.path.abspath(owltools_location), file_location, '--info']) res = output.decode('utf-8').split('\n')[-5:] cls, axs, op, ind = res[0].split(':')[-1], res[3].split(':')[-1], res[2].split(':')[-1], res[1].split(':')[-1] sent = 'The knowledge graph contains {0} classes, {1} axioms, {2} object properties, and {3} individuals' stats = sent.format(cls, axs, op, ind) return stats
d3293b00a49668a48a788d00a09efe603a6d7aee
3,644,444
from docutils.parsers.rst.directives import _directives, _directive_registry def get_directives(app: Sphinx): """Return all directives available within the current application.""" all_directives = {} all_directives.update(_directive_registry) all_directives.update(_directives) for key, (modulename, classname) in _directive_registry.items(): if key not in all_directives: try: module = import_module(f"docutils.parsers.rst.directives.{modulename}") all_directives[key] = getattr(module, classname) except (AttributeError, ModuleNotFoundError): pass for domain_name in app.env.domains: domain = app.env.get_domain(domain_name) prefix = "" if domain.name == "std" else f"{domain.name}:" # TODO 'default_domain' is also looked up by # sphinx.util.docutils.sphinx_domains.lookup_domain_element for direct_name, direct in domain.directives.items(): all_directives[f"{prefix}{direct_name}"] = direct return all_directives
bc65f3453fd9f473b33457ddd06a2052558b02dd
3,644,445
import os def _get_wavs_from_dir(dir): """Return a sorted list of wave files from a directory.""" return [os.path.join(dir, f) for f in sorted(os.listdir(dir)) if \ _is_wav_file(f)]
6e7c29ffac008afa8cc80a7ce0fccb307d69ea63
3,644,446
from mabel import DictSet, Reader from ...internals.group_by import GroupBy def SqlReader(sql_statement: str, **kwargs): """ Use basic SQL queries to filter Reader. Parameters: sql_statement: string kwargs: parameters to pass to the Reader Note: `select` is taken from SQL SELECT `dataset` is taken from SQL FROM `filters` is taken from SQL WHERE """ # some imports here to remove cyclic imports sql = SqlParser(sql_statement) get_logger().info(repr(sql)) actual_select = sql.select_expression if sql.select_expression is None: actual_select = "*" elif sql.select_expression != "*": actual_select = sql.select_expression + ", *" reducer = None if sql.select_expression == "COUNT(*)": reducer = lambda x: {"*": "*"} # FROM clause # WHERE clause if isinstance(sql.dataset, list): # it's a list if it's been parsed into a SQL statement, # this is how subqueries are interpretted - the parser # doesn't extract a dataset name - it collects parts of # a SQL statement which it can then pass to a SqlReader # to get back a dataset - which we then use as the # dataset for the outer query. reader = SqlReader("".join(sql.dataset), **kwargs) else: reader = Reader( select=actual_select, dataset=sql.dataset, filters=sql.where_expression, **kwargs, ) # GROUP BY clause if sql.group_by or any( [ t["type"] == TOKENS.AGGREGATOR for t in sql.select_evaluator.tokens ] # type:ignore ): # convert the clause into something we can pass to GroupBy if sql.group_by: groups = [ group.strip() for group in sql.group_by.split(",") if group.strip() != "" ] else: groups = ["*"] # we're not really grouping aggregations = [] renames = [] for t in sql.select_evaluator.tokens: # type:ignore if t["type"] == TOKENS.AGGREGATOR: aggregations.append((t["value"], t["parameters"][0]["value"])) if t["as"]: t["raw"] = get_function_name(t) renames.append(t) elif t["type"] == TOKENS.VARIABLE and t["value"] not in groups: raise InvalidSqlError( "Invalid SQL - SELECT clause in a statement with a GROUP BY clause must be made of aggregations or items from the GROUP BY clause." ) if aggregations: grouped = GroupBy(reader, groups).aggregate(aggregations) else: grouped = GroupBy(reader, groups).groups() # there could be 250000 groups, so we're not going to load them into memory reader = DictSet(grouped) # HAVING clause # if we have a HAVING clause, filter the grouped data by it if sql.having: reader = reader.filter(sql.having) # SELECT clause renames = {} # type:ignore for t in sql.select_evaluator.tokens: # type:ignore if t["as"]: renames[get_function_name(t)] = t["as"] def _perform_renames(row): for k, v in [(k, v) for k, v in row.items()]: if k in renames: row[renames[k]] = row.pop(k, row.get(renames[k])) return row if renames: reader = DictSet(map(_perform_renames, reader)) reader = reader.select(sql.select_evaluator.fields()) # type:ignore # disctinct now we have only the columns we're interested in if sql.distinct: reader = reader.distinct() # ORDER BY clause if sql.order_by: take = 10000 # the Query UI is currently set to 2000 if sql.limit: take = int(sql.limit) reader = DictSet( reader.sort_and_take( column=sql.order_by, take=take, descending=sql.order_descending ) ) # LIMIT clause if sql.limit: reader = reader.take(sql.limit) return reader
0354dd8b4d8cc6913cc1887b96aba6a06613ffe5
3,644,447
def _handle_consent_confirmation(user, is_confirmed): """ Return server response given user consent. Args: user (fence.models.User): authN'd user is_confirmed (str): confirmation param """ if is_confirmed == "yes": # user has already given consent, continue flow response = server.create_authorization_response(grant_user=user) else: # user did not give consent response = server.create_authorization_response(grant_user=None) return response
c4f61ed8465616a4fad912d02e81840eb9d34604
3,644,448
import math def local_coherence(img, window_s=WSIZ): """ Calculate the coherence according to methdology described in: Bazen, Asker M., and Sabih H. Gerez. "Segmentation of fingerprint images." ProRISC 2001 Workshop on Circuits, Systems and Signal Processing. Veldhoven, The Netherlands, 2001. """ coherence = [] rs = window_s cs = window_s for r in range(4, img.shape[0] - rs, rs): for c in range(4, img.shape[1] - cs, cs): window = img[r:r + rs, c:c + cs] if window.var() != 0: # Need variance because of the constraint (gxx + gyy) < 0 gx = np.uint8(np.absolute(cv2.Sobel(window, cv2.CV_64F, 1, 0, ksize=5))).flatten() gy = np.uint8(np.absolute(cv2.Sobel(window, cv2.CV_64F, 0, 1, ksize=5))).flatten() gxx = sum([int(x) ** 2 for x in gx]) gyy = sum([int(y) ** 2 for y in gy]) gxy = sum([int(x) * int(y) for x, y in zip(gx, gy)]) assert gxx + gyy != 0 coherence.append(math.sqrt((math.pow((gxx - gyy), 2) + 4 * math.pow(gxy, 2))) / (gxx + gyy)) return coherence
d360b388d743a3ada1004be8367ed2d105f7857a
3,644,449
def storeIDToWebID(key, storeid): """ Takes a key (int) and storeid (int) and produces a webid (a 16-character str suitable for including in URLs) """ i = key ^ storeid l = list('%0.16x' % (i,)) for nybbleid in range(0, 8): a, b = _swapat(key, nybbleid) _swap(l, a, b) return ''.join(l)
38d9bffaa98c2191e818edd969d51873bb077094
3,644,450
def _jupyter_server_extension_paths(): """ Set up the server extension for collecting metrics """ return [{"module": "jupyter_resource_usage"}]
f59c343dd8bcdb4755c725107b3c83f12978e9ef
3,644,451
import collections def _make_ordered_node_map( pipeline: p_pb2.Pipeline ) -> 'collections.OrderedDict[str, p_pb2.PipelineNode]': """Helper function to prepare the Pipeline proto for DAG traversal. Args: pipeline: The input Pipeline proto. Since we expect this to come from the compiler, we assume that it is already topologically sorted. Returns: An OrderedDict that map node_ids to PipelineNodes. """ node_map = collections.OrderedDict() for pipeline_or_node in pipeline.nodes: node_id = pipeline_or_node.pipeline_node.node_info.id node_map[node_id] = pipeline_or_node.pipeline_node return node_map
c0f7af61adf114a3b2211d82de050bf5e1f4e681
3,644,452
import re import os def read_omex_meta_files_for_archive(archive, archive_dirname, config=None): """ Read all of the OMEX Metadata files in an archive Args: archive (:obj:`CombineArchive`): COMBINE/OMEX archive archive_dirname (:obj:`str`): directory with the content of the archive config (:obj:`Config`, optional): configuration Returns: :obj:`tuple`: * :obj:`object`: representation of the OMEX Metadata file in :obj:`schema` * nested :obj:`list` of :obj:`str`: nested list of errors with the OMEX Metadata file * nested :obj:`list` of :obj:`str`: nested list of warnings with the OMEX Metadata file """ content = [] errors = [] warnings = [] if config is None: config = get_config() filenames = [] for item in archive.contents: if item.format and re.match(CombineArchiveContentFormatPattern.OMEX_METADATA.value, item.format): filenames.append(os.path.join(archive_dirname, item.location)) if filenames: return read_omex_meta_file(filenames, archive=archive, working_dir=archive_dirname, config=config) else: content = [] errors = [[( 'The COMBINE/OMEX does not contain an OMEX Metadata file. ' 'Archives must contain metadata for publication to BioSimulations.' )]] warnings = [] return (content, errors, warnings)
3b4d95a268a7f303e7006dbf61d6c15fef212064
3,644,453
from typing import Optional from typing import Tuple def fmin_b_bfgs(func, x0, args=(), options=None): """ The BFGS algorithm from Algorithm 6.1 from Wright and Nocedal, 'Numerical Optimization', 1999, pg. 136-143 with bounded parameters, using the active set approach from, Byrd, R. H., Lu, P., Nocedal, J., & Zhu, C. (1995). 'A Limited Memory Algorithm for Bound Constrained Optimization.' SIAM Journal on Scientific Computing, 16(5), 1190–1208. doi:10.1137/0916069 Notes: We utilise boolean arithmetic to avoid jax.cond calls which don't work on accelerators. A side effect is that we perform more gradient evaluations than scipy's BFGS func: callable Function of the form f(x) where x is a flat ndarray and returns a real scalar. The function should be composed of operations with vjp defined. If func is jittable then fmin_bfgs is jittable. If func is not jittable, then _nojit should be set to True. x0: ndarray initial variable args: tuple, optional Extra arguments to pass to func as func(x,*args) options: Optional dict of parameters maxiter: int Maximum number of evaluations norm: float Order of norm for convergence check. Default inf. gtol: flat Terminates minimization when |grad|_norm < g_tol ls_maxiter: int Maximum number of linesearch iterations bounds: 2-tuple of two vectors specifying the lower and upper bounds. e.g. (l, u) where l and u have the same size as x0. For parameters x_i without constraints the corresponding l_i=-jnp.inf and u_i=jnp.inf. Specifying l=None or u=None means no constraints on that side. Returns: BFGSResults """ if options is None: options = dict() maxiter: Optional[int] = options.get('maxiter', None) norm: float = options.get('norm', jnp.inf) gtol: float = options.get('gtol', 1e-5) ls_maxiter: int = options.get('ls_maxiter', 10) bounds: Tuple[jnp.ndarray, jnp.ndarray] = tuple(options.get('bounds', (None, None))) state = BFGSResults(converged=False, failed=False, k=0, nfev=0, ngev=0, nhev=0, x_k=x0, f_k=None, g_k=None, H_k=None, status=None, ls_status=jnp.array(0)) if maxiter is None: maxiter = jnp.size(x0) * 200 d = x0.shape[0] l = bounds[0] u = bounds[1] if l is None: l = -jnp.inf * jnp.ones_like(x0) if u is None: u = jnp.inf * jnp.ones_like(x0) l,u = jnp.where(l<u, l, u), jnp.where(l<u,u, l) def project(x,l,u): return jnp.clip(x,l, u) def get_active_set(x, l, u): return jnp.where((x==l) | (x==u)) def func_with_args(x): return func(x, *args) def get_generalised_Cauchy_point(xk, gk, l, u): def func(t): return func_with_args(project(xk - t* gk, l, u)) initial_H = jnp.eye(d) initial_H = options.get('hess_inv', initial_H) value_and_grad = jax.value_and_grad(func_with_args) f_0, g_0 = value_and_grad(x0) state = state._replace(f_k=f_0, g_k=g_0, H_k=initial_H, nfev=state.nfev + 1, ngev=state.ngev + 1, converged=jnp.linalg.norm(g_0, ord=norm) < gtol) def body(state): p_k = -(state.H_k @ state.g_k) line_search_results = line_search(value_and_grad, state.x_k, p_k, old_fval=state.f_k, gfk=state.g_k, maxiter=ls_maxiter) state = state._replace(nfev=state.nfev + line_search_results.nfev, ngev=state.ngev + line_search_results.ngev, failed=line_search_results.failed, ls_status=line_search_results.status) s_k = line_search_results.a_k * p_k x_kp1 = state.x_k + s_k f_kp1 = line_search_results.f_k g_kp1 = line_search_results.g_k # print(g_kp1) y_k = g_kp1 - state.g_k rho_k = jnp.reciprocal(y_k @ s_k) sy_k = s_k[:, None] * y_k[None, :] w = jnp.eye(d) - rho_k * sy_k H_kp1 = jnp.where(jnp.isfinite(rho_k), jnp.linalg.multi_dot([w, state.H_k, w.T]) + rho_k * s_k[:, None] * s_k[None, :], state.H_k) converged = jnp.linalg.norm(g_kp1, ord=norm) < gtol state = state._replace(converged=converged, k=state.k + 1, x_k=x_kp1, f_k=f_kp1, g_k=g_kp1, H_k=H_kp1 ) return state state = while_loop( lambda state: (~ state.converged) & (~state.failed) & (state.k < maxiter), body, state) state = state._replace(status=jnp.where(state.converged, jnp.array(0), # converged jnp.where(state.k == maxiter, jnp.array(1), # max iters reached jnp.where(state.failed, jnp.array(2) + state.ls_status, # ls failed (+ reason) jnp.array(-1))))) # undefined return state
0f8ce3e1873b9a5a955b95489ea454e3c9813524
3,644,454
def find_broken_in_text(text, ignore_substrings=None): """Find broken links """ links = _find(text, ignore_substrings=ignore_substrings) responses = [_check_if_broken(link) for link in links] return [res.url for res in responses if res.broken]
43075b6abb1ba8e7fc6f163e1afd1d6f305d99ab
3,644,455
from importlib import resources import importlib_resources as resources def revision_info(): """ Get the git hash and mtime of the repository, or the installed files. """ # TODO: test with "pip install -e ." for developer mode global _REVISION_INFO if _REVISION_INFO is None: _REVISION_INFO = git_rev(repo_path()) if _REVISION_INFO is None: try: except ImportError: # CRUFT: pre-3.7 requires importlib_resources try: revdata = resources.read_text(PACKAGE_NAME, RESOURCE_NAME) commit = revdata.strip() _REVISION_INFO = commit except Exception: _REVISION_INFO = "unknown" return _REVISION_INFO
ee0899e72265d6cc9408f933320554f84e51b6d9
3,644,456
def home(): """ route for the index page""" return jsonify({"message" : "welcome to fast_Food_Fast online restaurant"})
cad54560f01361ff6d9fed1d117f8b50eff59b50
3,644,457
def singlediode_voc(effective_irradiance, temp_cell, module_parameters): """ Calculate voc using the singlediode model. Parameters ---------- effective_irradiance temp_cell module_parameters Returns ------- """ photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth = \ calcparams_singlediode(effective_irradiance, temp_cell, module_parameters) # out = pvlib.pvsystem.singlediode(photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth, # method='newton') v_oc = pvlib.singlediode.bishop88_v_from_i(0, photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth, method='newton') return v_oc
b47cff93f7ad51ce026fd1e28c6427bcaca0a639
3,644,458
from apysc._file import file_util from typing import Optional from typing import List def _exec_document_lint_and_script( limit_count: Optional[int] = None) -> List[str]: """ Execute each runnable scripts in the documents and check with each lint. Parameters ---------- limit_count : int or None, optional Limitation of the script execution count. Returns ------- executed_scripts : list of str List of executed Python scripts. """ md_file_paths: List[str] = \ file_util.get_specified_ext_file_paths_recursively( extension='.md', dir_path='./docs_src/') hashed_vals: List[str] md_file_paths, hashed_vals = _slice_md_file_by_hashed_val( md_file_paths=md_file_paths) script_data_list: List[_ScriptData] = _make_script_data_list( md_file_paths=md_file_paths, hashed_vals=hashed_vals, limit_count=limit_count) workers: int = max(mp.cpu_count() // 2, 1) logger.info(msg="Document's code block flake8 checking started...") with mp.Pool(workers) as p: p.map(func=_check_code_block_with_flake8, iterable=script_data_list) logger.info(msg="Document's code block numdoclint checking started...") with mp.Pool(workers) as p: p.map( func=_check_code_block_with_numdoclint, iterable=script_data_list) logger.info(msg="Document's code block mypy checking started...") with mp.Pool(workers) as p: p.map(func=_check_code_block_with_mypy, iterable=script_data_list) logger.info(msg="Document's scripts execution started...") with mp.Pool(workers) as p: run_return_data_list: List[_RunReturnData] = p.map( func=_run_code_block_script, iterable=script_data_list) _move_code_block_outputs() _validate_script_return_data(return_data_list=run_return_data_list) _save_hashed_val(script_data_list=script_data_list) executed_scripts: List[str] = [ script_data['runnable_script'] for script_data in script_data_list] return executed_scripts
a9406dfc5180c5b0f820740ae99522d8141af22d
3,644,459
import seaborn as sns import warnings def balance_boxplot(balance_name, data, num_color='#FFFFFF', denom_color='#FFFFFF', xlabel="", ylabel="", linewidth=1, ax=None, **kwargs): """ Plots a boxplot for a given balance on a discrete metadata category. Parameters ---------- x, y, hue: str Variable names to be passed into the seaborn plots for plotting. balance_name : str Name of balance to plot. data : pd.DataFrame Merged dataframe of balances and metadata. num_color : str Hex for background colors of values above zero. denom_color : str Hex for background colors of values below zero. xlabel : str x-axis label. ylabel : str y-axis label. linewidth : str Width of the grid lines. ax : matplotlib axes object Axes object to render boxplots in. **kwargs : dict Values to pass in to customize seaborn boxplot. Returns ------- a : matplotlib axes object Matplotlit axes object with rendered boxplots. See Also -------- seaborn.boxplot """ warnings.warn("This visualization are deprecated.", DeprecationWarning) if ax is None: f, ax = plt.subplots() # the number 20 is pretty arbitrary - we are just # resizing to make sure that there is separation between the # edges of the plot, and the boxplot pad = (data[balance_name].max() - data[balance_name].min()) / 20 ax.axvspan(data[balance_name].min() - pad, 0, facecolor=num_color, zorder=0) ax.axvspan(0, data[balance_name].max() + pad, facecolor=denom_color, zorder=0) if 'hue' in kwargs.keys(): hue = kwargs['hue'] num_groups = len(data[hue].value_counts()) else: num_groups = 1 a = sns.boxplot(ax=ax, x=balance_name, data=data, **kwargs) a.minorticks_on() minorLocator = matplotlib.ticker.AutoMinorLocator(num_groups) a.get_yaxis().set_minor_locator(minorLocator) a.grid(axis='y', which='minor', color='k', linestyle=':', linewidth=1) a.set_xlim([data[balance_name].min() - pad, data[balance_name].max() + pad]) a.set_xlabel(xlabel) a.set_ylabel(ylabel) return a
f15a0168282d59dabb5deddd86935d1417df97ef
3,644,460
import os def walk(dirname, file_list): """ This function is from a book called Think Python written by Allen B. Downey. It walks through a directory, gets names of all files and calls itself recursively on all the directories """ for name in os.listdir(dirname): path=os.path.join(dirname,name) if os.path.isfile(path): file_list.append(path) else: walk(path, file_list) return file_list
fab10858f2887e30aac9e12a9a47b6d88a778a60
3,644,461
from typing import Dict from typing import Union def _apply_result_filters(key_gender_token_counters: Dict[Union[str, int], GenderTokenCounters], diff: bool, sort: bool, limit: int, remove_swords: bool) -> KeyGenderTokenResponse: """ A private helper function for applying optional keyword arguments to the output of GenderProximityAnalysis methods, allowing the user to sort, diff, limit, and remove stopwords from the output. These transformations do not mutate the input. :param key_gender_token_counters: a dictionary shaped Dict[Union[str, int], GenderTokenCounters] :param diff: return the difference in token occurrences across Genders. :param sort: return an array of the shape Sequence[Tuple[str, int]] :param limit: if sort==True, return only n=limit token occurrences. :param remove_swords: remove stop words from output. :return: a dictionary of the shape Dict[Union[str, int], GenderTokenResponse] >>> test_counter_1 = Counter({'foo': 1, 'bar': 2, 'own': 2}) >>> test_counter_2 = Counter({'foo': 5, 'baz': 2}) >>> test = {'doc': {'Male': test_counter_1, 'Female': test_counter_2}} >>> _apply_result_filters(test, diff=True, sort=False, limit=10, remove_swords=False).get('doc') {'Male': Counter({'bar': 2, 'own': 2, 'foo': -4}), 'Female': Counter({'foo': 4, 'baz': 2})} >>> _apply_result_filters(test, diff=False, sort=True, limit=10, remove_swords=False).get('doc') {'Male': [('bar', 2), ('own', 2), ('foo', 1)], 'Female': [('foo', 5), ('baz', 2)]} >>> _apply_result_filters(test, diff=False, sort=False, limit=10, remove_swords=True).get('doc') {'Male': Counter({'bar': 2, 'foo': 1}), 'Female': Counter({'foo': 5, 'baz': 2})} >>> _apply_result_filters(test, diff=True, sort=True, limit=10, remove_swords=False).get('doc') {'Male': [('bar', 2), ('own', 2), ('foo', -4)], 'Female': [('foo', 4), ('baz', 2)]} """ output = {} for key, gender_token_counters in key_gender_token_counters.items(): if remove_swords: output[key] = _remove_swords(gender_token_counters) else: output[key] = gender_token_counters if diff: output[key] = _diff_gender_token_counters(output[key]) if sort: output[key] = _sort_gender_token_counters(output[key], limit=limit) return output
120bb37936293810796ad6e62cee6b3c0bccabe4
3,644,462
def blog_delete(request): """Delete blog entry by id.""" blog_id = int(request.params.get('id')) entry = BlogRecordService.by_id(blog_id, request) if not entry: return HTTPNotFound() request.dbsession.delete(entry) return HTTPFound(location=request.route_url('home'))
4e1b9a19cd3a33743479de69ee1fbc4ffc9f9a42
3,644,463
import aiohttp async def get_ios_cfw(): """Gets all apps on ios.cfw.guide Returns ------- dict "ios, jailbreaks, devices" """ async with aiohttp.ClientSession() as session: async with session.get("https://api.appledb.dev/main.json") as resp: if resp.status == 200: data = await resp.json() return data
dfb0dfafef2ef8e27940bc7a154cd4a35f863017
3,644,464
def server_error(errorMsg): """ Shorthand for returning error message. """ resp = HttpResponse(status=502) resp.write("<h3>502 BAD GATEWAY: </h3>") resp.write("<p>ERROR: {}</p>".format(errorMsg)) return resp
cadadfc0a8c0098832ca08080e3602bdaf01ffc4
3,644,465
import re def protein_variant(variant): """ Return an HGVS_ variant string containing only the protein changes in a coding HGVS_ variant string. If all variants are synonymous, returns the synonymous variant code. If the variant is wild type, returns the wild type variant. :param str variant: coding variant string :return: protein variant string (or synonymous or wild type) :rtype: str """ if len(variant) == 0: raise ValueError("Empty variant string.") elif variant == WILD_TYPE_VARIANT: return WILD_TYPE_VARIANT elif variant == SYNONYMOUS_VARIANT: return SYNONYMOUS_VARIANT else: matches = re.findall("\((p\.\S*)\)", variant) if len(matches) == 0: raise ValueError("Invalid coding variant string.") # uniqify and remove synonymous seen = {"p.=": True} unique_matches = list() for v in matches: if v in seen: continue else: seen[v] = True unique_matches.append(v) if len(unique_matches) == 0: return SYNONYMOUS_VARIANT else: return ", ".join(unique_matches)
ed9d11759ed5d09f76daa757b9e75d00bfd0f029
3,644,466
import os import argparse def parser(): """Parses arguments from command line using argparse. Parameters""" # default directory for reddit files default_directory = os.path.join(os.getcwd(), "data") parser = argparse.ArgumentParser() # obligatory parser.add_argument("mode", type = int, help = "execution mode: 1 build index, 2: query using existing index, 3 build and query") # conditionally obligatory parser.add_argument("--start", "-s", type = str, help = "first year/month") parser.add_argument("--end", "-e", type = str, help = "last year/month") # optional with defaults parser.add_argument("--dir", "-d", type = str, nargs = "?", default = default_directory, help = "directory for data storage") parser.add_argument("--num", "-n", type = int, nargs = "?", default = 10, help = "number of results per query") parser.add_argument("--fulltext", "-f", action = "store_true", help = "store fulltext and/or return in queries") parser.add_argument("--all", "-a", action = "store_true", help = "Return documents containing all rather than any of the query terms") parser.add_argument("--minfreq", "-m", type = int, nargs = "?", default = 5, help = "minimum term frequency") parser.add_argument("--lemma", "-l", action = "store_true", help = "lemmatize comments/queries") parser.add_argument("--cores", "-c", type = int, nargs = "?", default = 1, help = "number of cores to use") parser.add_argument("--progress", "-p", action = "store_true", help = "report progress") return parser
271f4a5db0a5e8f6b201c098830885e768d246b7
3,644,467
import logging def dump_func_name(func): """This decorator prints out function name when it is called Args: func: Returns: """ def echo_func(*func_args, **func_kwargs): logging.debug('### Start func: {}'.format(func.__name__)) return func(*func_args, **func_kwargs) return echo_func
9db126f3c51ed28bb5e8aa206e5a11d36c82e008
3,644,468
import scipy from functools import reduce def calc_predictability_trace_of_avg_cov(x, k, p, ndim=False): """ The main evaluation criterion of GPFA, i.e., equation (2) from the paper. :param x: data array :param k: number of neighbors for estimate :param p: number of past time steps to consider :param ndim: n-dimensional evaluation if True :return: estimated variance in the next time step """ def _cov(t): successors = neighbors[t] + 1 successors = successors[successors<N] suc_dat = x[successors] return np.array(np.cov(suc_dat.T), ndmin=2) # pairwise distances of data points if x.ndim == 1: x = np.array(x, ndmin=2).T N, _ = x.shape y = concatenate_past(x, p=p) tree = scipy.spatial.cKDTree(y) neighbors = [tree.query(y[i], k=k+1)[1] for i in xrange(y.shape[0])] assert len(neighbors) == N covariances = map(_cov, range(p-1, N-1)) covariance = reduce(lambda a,b: a+b, covariances) / (N-p) if ndim: E, _ = np.linalg.eigh(covariance) return E result = np.trace(covariance) assert np.isfinite(result) return result
a803847ce8f8791edf44d3ba102137e69836f410
3,644,469
from typing import Dict from typing import Sequence def nx_to_loreleai(graph: nx.Graph, relation_map: Dict[str, Predicate] = None) -> Sequence[Atom]: """ Converts a NetworkX graph into Loreleai representation To indicate the type of relations and nodes, the functions looks for a 'type' attribute Arguments: graph: NetworkX graph relation_map: maps from edge types to predicates """ literals = [] if relation_map is None: relation_map = {} for (u, v, t) in graph.edges.data('type', default=None): literals.append(relation_map[t](u, v)) return literals
f847c26d0831bf6bbaf16eabe5a32d27118550da
3,644,470
def _kohn_sham_iteration( density, external_potential, grids, num_electrons, xc_energy_density_fn, interaction_fn, enforce_reflection_symmetry): """One iteration of Kohn-Sham calculation.""" # NOTE(leeley): Since num_electrons in KohnShamState need to specify as # static argument in jit function, this function can not directly take # KohnShamState as input arguments. The related attributes in KohnShamState # are used as input arguments for this helper function. if enforce_reflection_symmetry: xc_energy_density_fn = _flip_and_average_on_center_fn(xc_energy_density_fn) hartree_potential = scf.get_hartree_potential( density=density, grids=grids, interaction_fn=interaction_fn) xc_potential = scf.get_xc_potential( density=density, xc_energy_density_fn=xc_energy_density_fn, grids=grids) ks_potential = hartree_potential + xc_potential + external_potential xc_energy_density = xc_energy_density_fn(density) # Solve Kohn-Sham equation. density, total_eigen_energies, gap = scf.solve_noninteracting_system( external_potential=ks_potential, num_electrons=num_electrons, grids=grids) total_energy = ( # kinetic energy = total_eigen_energies - external_potential_energy total_eigen_energies - scf.get_external_potential_energy( external_potential=ks_potential, density=density, grids=grids) # Hartree energy + scf.get_hartree_energy( density=density, grids=grids, interaction_fn=interaction_fn) # xc energy + scf.get_xc_energy( density=density, xc_energy_density_fn=xc_energy_density_fn, grids=grids) # external energy + scf.get_external_potential_energy( external_potential=external_potential, density=density, grids=grids) ) if enforce_reflection_symmetry: density = _flip_and_average_on_center(density) return ( density, total_energy, hartree_potential, xc_potential, xc_energy_density, gap)
81ecffb04d0bc76b31187708c3502acead8653ab
3,644,471
def get_sync_func_driver(physical_mesh): """Get the sync function on the driver.""" def sync_func_driver(): assert isinstance(physical_mesh, LocalPhysicalDeviceMesh) physical_mesh.devices[0].synchronize_all_activity() return sync_func_driver
13dee330aa22524c52272c1969f3acba23f4378f
3,644,472
def get_nc_BGrid_GFDL(grdfile): """ Bgrd = get_nc_BGrid_GFDL(grdfile) Load B-Grid grid object for GFDL CM2.1 from netCDF grid file """ nc = pyroms.io.Dataset(grdfile) lon_t = nc.variables['geolon_t'][:] lat_t = nc.variables['geolat_t'][:] lon_uv = nc.variables['geolon_c'][:] lat_uv = nc.variables['geolat_c'][:] h = nc.variables['ht'][:] f = nc.variables['coriolis_param'][:] kmt = nc.variables['kmt'][:] z_t = nc.variables['st_ocean'][:] z_t_edges = nc.variables['st_edges_ocean'][:] kmu = nc.variables['kmu'][:] z_uv = nc.variables['sw_ocean'][:] z_uv_edges = nc.variables['sw_edges_ocean'][:] # compute mask at t-point M_t, L_t = kmt.shape N_t = z_t.shape[0] mask_t = np.zeros((N_t, M_t, L_t)) for j in range(M_t): for i in range(L_t): try: mask_t[0:kmt[j,i], j,i] = 1 except: mask_t[:, j,i] = 0 # compute mask at uv-point M_uv, L_uv = kmu.shape N_uv = z_uv.shape[0] mask_uv = np.zeros((N_uv, M_uv, L_uv)) for j in range(M_uv): for i in range(L_uv): try: mask_uv[0:kmt[j,i], j,i] = 1 except: mask_uv[:, j,i] = 0 return BGrid_GFDL(lon_t, lat_t, lon_uv, lat_uv, \ mask_t, mask_uv, h, z_t, z_t_edges, \ z_uv, z_uv_edges, f)
0b6f844676da7f357334640eafdb3039127df912
3,644,473
def _getTimeDistORSlocal(fromLocs, toLocs, travelMode, port, speedMPS): """ Generate two dictionaries, one for time, another for distance, using ORS-local Parameters ---------- fromLocs: list, Required The start node coordinates in format of [[lat, lon], [lat, lon], ... ] toLocs: list, Required The End node coordinates in format of [[lat, lon], [lat, lon], ... ] travelMode: string, Required The travel mode for ORS, options are 'fastest', 'pedestrian', 'cycling', 'truck' port: string, Required localhost connection port speedMPS: float, Required A constant speed for calculation returns ------- timeSecs: dictionary A dictionary for time from nodes to nodes, unit is in [seconds] distMeters: dictionary A dictionary for distance from nodes to nodes, unit is in [meters] """ if (fromLocs == toLocs): locs = fromLocs.copy() [timeSecs, distMeters] = orsLocalGetTimeDistAll2All(locs, travelMode, port) elif (len(fromLocs) == 1): fromLoc = fromLocs[0] [timeSecs, distMeters] = orsLocalGetTimeDistOne2Many(fromLoc, toLocs, travelMode, port) elif (len(toLocs) == 1): toLoc = toLocs[0] [timeSecs, distMeters] = orsLocalGetTimeDistMany2One(fromLocs, toLoc, travelMode, port) else: for i in range(len(fromLocs)): [timeRow, distRow] = orsLocalGetTimeDistOne2Many(fromLocs[i], toLocs, routeType, port) for j in range(len(toLocs)): distMeters[i, j] = distRow[0, j] timeSecs[i, j] = timeRow[0, j] if (speedMPS != None): for i in range(len(fromLocs)): for j in range(len(toLocs)): timeSecs[i, j] = distMeters[i, j] / speedMPS return [timeSecs, distMeters]
67feca093769c4cef4f4383cb6eaca4e0f584019
3,644,474
def sequence_plus_one(x_init, iter, dtype=int): """ Mathematical sequence: x_n = x_0 + n :param x_init: initial values of the sequence :param iter: iteration until the sequence should be evaluated :param dtype: data type to cast to (either int of float) :return: element at the given iteration and array of the whole sequence """ def iter_function(x_seq, i, x_init): return x_seq[0, :] + i return sequence(x_init, iter, iter_function, dtype)
ec84cdb2f98147d2d1d967f7a071d3124ccdf54a
3,644,475
def _is_test_product_type(product_type): """Returns whether the given product type is for tests purposes or not.""" return product_type in ( apple_product_type.ui_test_bundle, apple_product_type.unit_test_bundle, )
41847ab87e4a8a0dfc2b2758b70b5f7b5d7b952b
3,644,476
def _synced(method, self, args, kwargs): """Underlying synchronized wrapper.""" with self._lock: return method(*args, **kwargs)
54ca3cf69742550bd34ff3d2299a2d84f78577a3
3,644,477
def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" return meta('NewBase', bases, {})
a8257c1d7a4fdec6331985983b65954e9b1d9453
3,644,478
def team(slug): """The team page. Shows statuses for all users in the team.""" db = get_session(current_app) team = db.query(Team).filter_by(slug=slug).first() if not team: return page_not_found('Team not found.') return render_template( 'status/team.html', team=team, users=team.users, teams=db.query(Team).order_by(Team.name).all(), statuses=team.recent_statuses( request.args.get('page', 1), startdate(request), enddate(request)))
c702b4837c3e7342e248f81180821c4ff3404793
3,644,479
def mask_layer(layer, mask, mask_value = np.nan): """apply a mask to a layer layer[mask == True] = mask_value """ layer[mask] = mask_value return layer
b8ac53633bb351eea2e0025eeb5daba1f2eeab54
3,644,480
import requests def generate_twitch_clip(user_id): """Generate a Twitch Clip from user's channel. Returns the URL and new clip object on success.""" user = User.get_user_from_id(user_id) twitch_id = str(user.twitch_id) payload_clips = {"broadcaster_id": TEST_ID or twitch_id} # Edit this to test r_clips = requests.post("https://api.twitch.tv/helix/clips", data=payload_clips, headers=create_header(user)) if r_clips.status_code == 202: # Save the clip's slug; used as `id` in Twitch API clip_slug = r_clips.json().get("data")[0].get("id") # Send a request to Get Clips to confirm clip was created. clip_info = get_clip_info(clip_slug, user) if clip_info: # Store the url url = clip_info.get("url") # Save clip to DB new_clip = TwitchClip.save_twitch_clip(clip_slug, user_id) return (new_clip, url) # TODO: If this fails, return None. # Add better error handling. return None, None
1980974b79c07e3759044f5041c2e9c8df040fbb
3,644,481
def get_disabled(): """ Return a list of all disabled services CLI Example: .. code-block:: bash salt '*' service.get_disabled """ return _get_svc_list(status="DISABLED")
34d2389bf6e2c3284b06376780c7424205f340be
3,644,482
def calling_method(): """ call recursive method :return: list all post 2 days delta-time """ list_posts = list() return create_json_poyload(list_posts)
587afbf856dbd014a253847816c185edd23f8485
3,644,483
import syslog def check_notifier(notifiers): """Check if the configured notifier really exists.""" notifiers_available = { "syslog": notify_syslog, "pushover": notify_pushover, "mail": notify_mail, "twilio": notify_twilio } notifiers_valid = [] for notifier in notifiers: try: notifiers_valid.append(notifiers_available[notifier]) except KeyError: syslog.syslog(syslog.LOG_ERR, f"Unknown notifier {notifier} configured") return notifiers_valid
c5f57209943c9b97bd5e23ff7f3ea410d952d2c4
3,644,484
import json def read_json(path): """ Read a BayesNet object from the json format. This format has the ".bn" extension and is completely unique to pyBN. Arguments --------- *path* : a string The file path Returns ------- None Effects ------- - Instantiates and sets a new BayesNet object Notes ----- This function reads in a libpgm-style format into a bn object File Format: { "V": ["Letter", "Grade", "Intelligence", "SAT", "Difficulty"], "E": [["Intelligence", "Grade"], ["Difficulty", "Grade"], ["Intelligence", "SAT"], ["Grade", "Letter"]], "Vdata": { "Letter": { "ord": 4, "numoutcomes": 2, "vals": ["weak", "strong"], "parents": ["Grade"], "children": None, "cprob": [[.1, .9],[.4, .6],[.99, .01]] }, ... } """ def byteify(input): if isinstance(input, dict): return {byteify(key):byteify(value) for key,value in input.iteritems()} elif isinstance(input, list): return [byteify(element) for element in input] elif isinstance(input, unicode): return input.encode('utf-8') else: return input bn = BayesNet() f = open(path,'r') ftxt = f.read() success=False try: data = byteify(json.loads(ftxt)) bn.V = data['V'] bn.E = data['E'] bn.F = data['F'] success = True except ValueError: print("Could not read file - check format") bn.V = topsort(bn.E) return bn
4c483f8fe148ff3a94bdee4accc22fb2964dc09d
3,644,485
def solve_primal(run_id, problem, mip_solution, solver): """Solve primal by fixing integer variables and solving the NLP. If the search fails and f `mip_solution` has a solution pool, then also try to find a feasible solution starting at the solution pool points. Parameters ---------- run_id : str the run_id used for logging problem : Problem the mixed integer, (possibly) non convex problem mip_solution : MipSolution the linear relaxation solution solver : Solver the NLP solver used to solve the problem """ starting_point = [v.value for v in mip_solution.variables] solution = solve_primal_with_starting_point( run_id, problem, starting_point, solver ) if solution.status.is_success(): return solution # Try solutions from mip solution pool, if available if mip_solution.solution_pool is None: return solution for mip_solution_from_pool in mip_solution.solution_pool: if seconds_left() <= 0: return solution starting_point = [ v.value for v in mip_solution_from_pool.inner.variables ] solution_from_pool = solve_primal_with_starting_point( run_id, problem, starting_point, solver ) if solution_from_pool.status.is_success(): return solution_from_pool # No solution from pool was feasible, return original infeasible sol return solution
9b01b65553a752cebb899bcc8b4f78f5355db5f9
3,644,486
def SpliceContinuations(tree): """Given a pytree, splice the continuation marker into nodes. Arguments: tree: (pytree.Node) The tree to work on. The tree is modified by this function. """ def RecSplicer(node): """Inserts a continuation marker into the node.""" if isinstance(node, pytree.Leaf): if node.prefix.lstrip().startswith('\\\n'): new_lineno = node.lineno - node.prefix.count('\n') return pytree.Leaf( type=format_token.CONTINUATION, value=node.prefix, context=('', (new_lineno, 0))) return None num_inserted = 0 for index, child in enumerate(node.children[:]): continuation_node = RecSplicer(child) if continuation_node: node.children.insert(index + num_inserted, continuation_node) num_inserted += 1 RecSplicer(tree)
9bb36363b3ae8ef2e04649bc966d8e664fa1202f
3,644,487
def rayleigh(flow_resis, air_dens, sound_spd, poros, freq=np.arange(100, 10001, 1)): """ Returns through the Rayleigh Model the Material Charactheristic Impedance and the Material Wave Number. Parameters: ---------- flow_resis : int Resistivity of the material air_dens : int | float The air density sound_spd : int | float The speed of the sound poros : float Porosity of the material freq : ndarray A range of frequencies NOTE: default range goes from 100 [Hz] to 10 [kHz]. Returns: ------- zc : int | float | complex Material Charactheristic Impedance kc : int | float | complex Material Wave Number """ omega = 2 * np.pi * freq alpha = (1 - (1j * poros * flow_resis) / (air_dens * omega)) ** 0.5 # Material Charactheristic Impedance (zc) and the Material Wave Number (kc) kc = (omega/sound_spd) * alpha zc = ((air_dens * sound_spd)/poros) * alpha return zc, kc
cf1330591e1f97f831268bd19babac2d682369aa
3,644,488
import requests import logging def set_iam_policy(project_id: str, policy: dict, token: str) -> dict: """Sets the Cloud IAM access control policy for a ServiceAccount. Args: project_id: GCP project ID. policy: IAM policy. token: Access token from the Google Authorization Server. Returns: A dict containing the response body. """ host = "https://cloudresourcemanager.googleapis.com" url = f"{host}/v1/projects/{project_id}:setIamPolicy" resp = requests.post(url, json={ "policy": policy, }, headers={ "Authorization": f"Bearer {token}" }) try: resp.raise_for_status() except requests.exceptions.HTTPError as err: logging.error(err.response.text) raise err return resp.json()
3afc6902bcd20c4af62ba9c3f2e873da5d425d06
3,644,489
def feature_set_is_deployed(db: Session, fset_id: int) -> bool: """ Returns if this feature set is deployed or not :param db: SqlAlchemy Session :param feature_set_id: The Feature Set ID in question :return: True if the feature set is deployed """ d = db.query(models.FeatureSetVersion). \ filter((models.FeatureSetVersion.feature_set_id == fset_id) & (models.FeatureSetVersion.deployed == True)). \ count() return bool(d)
e66609ec97a17eb55ea0a6c7218a0f5f9fb1ca9b
3,644,490
from typing import Union from typing import Optional def get_retry_request( request: Request, *, spider: Spider, #response: Response, reason: Union[str, Exception] = 'unspecified', max_retry_times: Optional[int] = None, priority_adjust: Optional[int] = None, logger: Logger = retry_logger, stats_base_key: str = 'retry', ): """ Returns a new :class:`~scrapy.Request` object to retry the specified request, or ``None`` if retries of the specified request have been exhausted. For example, in a :class:`~scrapy.Spider` callback, you could use it as follows:: def parse(self, response): if not response.text: new_request_or_none = get_retry_request( response.request, spider=self, reason='empty', ) return new_request_or_none *spider* is the :class:`~scrapy.Spider` instance which is asking for the retry request. It is used to access the :ref:`settings <topics-settings>` and :ref:`stats <topics-stats>`, and to provide extra logging context (see :func:`logging.debug`). *reason* is a string or an :class:`Exception` object that indicates the reason why the request needs to be retried. It is used to name retry stats. *max_retry_times* is a number that determines the maximum number of times that *request* can be retried. If not specified or ``None``, the number is read from the :reqmeta:`max_retry_times` meta key of the request. If the :reqmeta:`max_retry_times` meta key is not defined or ``None``, the number is read from the :setting:`RETRY_TIMES` setting. *priority_adjust* is a number that determines how the priority of the new request changes in relation to *request*. If not specified, the number is read from the :setting:`RETRY_PRIORITY_ADJUST` setting. *logger* is the logging.Logger object to be used when logging messages *stats_base_key* is a string to be used as the base key for the retry-related job stats """ settings = spider.crawler.settings stats = spider.crawler.stats retry_times = request.meta.get('retry_times', 0) + 1 if max_retry_times is None: max_retry_times = request.meta.get('max_retry_times') if max_retry_times is None: max_retry_times = settings.getint('RETRY_TIMES') if retry_times <= max_retry_times: logger.debug( "Retrying %(request)s (failed %(retry_times)d times): %(reason)s", {'request': request, 'retry_times': retry_times, 'reason': reason}, extra={'spider': spider} ) new_request = request.copy() new_request.meta['retry_times'] = retry_times new_request.dont_filter = True if priority_adjust is None: priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST') new_request.priority = request.priority + priority_adjust if callable(reason): reason = reason() if isinstance(reason, Exception): reason = global_object_name(reason.__class__) stats.inc_value(f'{stats_base_key}/count') stats.inc_value(f'{stats_base_key}/reason_count/{reason}') return new_request else: stats.inc_value(f'{stats_base_key}/max_reached') logger.error( "Gave up retrying %(request)s (failed %(retry_times)d times): " "%(reason)s", {'request': request, 'retry_times': retry_times, 'reason': reason}, extra={'spider': spider}, ) return None
f7d46d4edcf30b90fd197e37d9f796c82ccae74f
3,644,491
import time async def graf(request: Request): """ Zobrazí graf nameranej charakteristiky """ localtime = time.asctime(time.localtime(time.time())) print("Graf; Čas:", localtime) return templates.TemplateResponse("graf.html", {"request": request, "time": localtime})
a09ad4790cfaf71927b2c3e2b371f4089c8f0937
3,644,492
def mapRangeUnclamped(Value, InRangeA, InRangeB, OutRangeA, OutRangeB): """Returns Value mapped from one range into another where the Value is clamped to the Input Range. (e.g. 0.5 normalized from the range 0->1 to 0->50 would result in 25)""" return lerp(OutRangeA, OutRangeB, GetRangePct(InRangeA, InRangeB, Value))
1579359f6585bb17228cf2b94b29ee8cd00e672e
3,644,493
import json def folders(request): """Handle creating, retrieving, updating, deleting of folders. """ if request.method == "GET": q = bookshelf_models.Folder.objects.filter(owner=request.user) data = [[e.guid, e.title] for e in q] if request.method == "POST": if "create" in request.POST: newfolder = bookshelf_models.Folder(owner=request.user, title="New Folder") newfolder.save() data = [[newfolder.guid, "New Folder"]] if "update" in request.POST: guid = request.POST.get("id", "") folder = bookshelf_models.Folder.objects.get(guid=guid) folder.title = request.POST.get("newname", "") folder.save() data = [[folder.guid, folder.title]] if "delete" in request.POST: folderid = request.POST.get("folderid", "") nbids = request.POST.getlist("nbids") folder = bookshelf_models.Folder.objects.get(owner=request.user, guid=folderid) folder.delete() for nbid in nbids: nb = notebook_models.Notebook.objects.get(owner=request.user, guid=nbid) nb.delete() data = {"response":"ok"} jsobj = json.dumps(data) return HttpResponse(jsobj, mimetype='application/json')
29a3c58970188682724e429d4f8a8a244938f54c
3,644,494
import os def reponame(url, name=None): """ Determine a repo's cloned name from its URL. """ if name is not None: return name name = os.path.basename(url) if name.endswith('.git'): name = name[:-4] return name
be8bb47f1fc8be940e469d6a999a2039edb2fa3a
3,644,495
import argparse from typing import Dict def specified_options(opts: argparse.Namespace, exclude=None) -> Dict: """ Cast an argparse Namespace into a dictionary of options. Remove all options that were not specified (equal to None). Arguments: opts: The namespace to cast. exclude: Names of options to exclude from the result. Returns: A dictionary of specified-only options. """ exclude = exclude or set() options = opts.__dict__.items() # noqa: WPS609 return {opt: value for opt, value in options if value is not None and opt not in exclude}
b1200fbedb5edcd8b44fef3e30f644cc582ca23f
3,644,496
def Sort_list_by_Prism_and_Date(lst): """ Argument: - A list containing the prism name, position of recording, decimal year, position and meteo corrected position for each prism. Return: - A list containing lists of prisms sorted by name and date. """ #text must be a converted GKA file outList = [] #[[Name,[Data]],[],[],...] #Sort by prism name for k in lst: index = FindIndexByName(k[0],outList) if index != None: outList[index][1].append(k) else: outList.append([k[0],[k]]) #Sort by crescent date for j in outList: j[1] = SortCrescent(j[1],2) return outList
164a4c8b646363b3d8c57068ee785b410cbc3cf7
3,644,497
def _convert_to_RVector(value, force_Rvec=True): """ Convert a value or list into an R vector of the appropriate type. Parameters ---------- value : numeric or str, or list of numeric or str Value to be converted. force_Rvec : bool, default True If `value` is not a list, force conversion into a R vector? False will return an int, float, or str if value is non-list. True will always return an R vector. Returns ------- int, float, str, an rpy2 R vector A value or R vector of an appropriate data type. """ if not isinstance(value, list) and not force_Rvec: return value elif not isinstance(value, list) and force_Rvec: value = [value] else: pass if all(isinstance(x, bool) for x in value): return ro.BoolVector(value) elif all(isinstance(x, (int, np.integer)) for x in value): return ro.IntVector(value) elif all(isinstance(x, (int, np.integer, float, np.float)) for x in value): return ro.FloatVector(value) else: return ro.StrVector(value)
cc71e8c8906084b33c1638a1423944576fb75366
3,644,498
def softmax(x): """Compute softmax values for each sets of scores in x.""" e_x = np.exp(x - np.max(x)) temp=e_x / e_x.sum(axis=0) # only difference if np.isnan(temp).any()==True: return [0.0,1.0,0.0] else: return temp
ed3a4c5e60dbfaf86acec1357e7700492ab3f69d
3,644,499