content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def vect3_scale(v, f): """ Scales a vector by factor f. v (3-tuple): 3d vector f (float): scale factor return (3-tuple): 3d vector """ return (v[0]*f, v[1]*f, v[2]*f)
94902cad0a7743f8e3ed1582bf6402229b8a028d
3,631,500
from typing import Optional def segment_max(data: Array, segment_ids: Array, num_segments: Optional[int] = None, indices_are_sorted: bool = False, unique_indices: bool = False, bucket_size: Optional[int] = None, mode: Optional[lax.GatherScatterMode] = None) -> Array: """Computes the maximum within segments of an array. Similar to TensorFlow's `segment_max <https://www.tensorflow.org/api_docs/python/tf/math/segment_max>`_ Args: data: an array with the values to be reduced. segment_ids: an array with integer dtype that indicates the segments of `data` (along its leading axis) to be reduced. Values can be repeated and need not be sorted. Values outside of the range [0, num_segments) are dropped and do not contribute to the result. num_segments: optional, an int with nonnegative value indicating the number of segments. The default is set to be the minimum number of segments that would support all indices in ``segment_ids``, calculated as ``max(segment_ids) + 1``. Since `num_segments` determines the size of the output, a static value must be provided to use ``segment_max`` in a ``jit``-compiled function. indices_are_sorted: whether ``segment_ids`` is known to be sorted. unique_indices: whether `segment_ids` is known to be free of duplicates. bucket_size: size of bucket to group indices into. ``segment_max`` is performed on each bucket separately. Default ``None`` means no bucketing. mode: a :class:`jax.lax.GatherScatterMode` value describing how out-of-bounds indices should be handled. By default, values outside of the range [0, num_segments) are dropped and do not contribute to the sum. Returns: An array with shape :code:`(num_segments,) + data.shape[1:]` representing the segment maximums. Examples: Simple 1D segment max: >>> data = jnp.arange(6) >>> segment_ids = jnp.array([0, 0, 1, 1, 2, 2]) >>> segment_max(data, segment_ids) DeviceArray([1, 3, 5], dtype=int32) Using JIT requires static `num_segments`: >>> from jax import jit >>> jit(segment_max, static_argnums=2)(data, segment_ids, 3) DeviceArray([1, 3, 5], dtype=int32) """ return _segment_update( "segment_max", data, segment_ids, lax.scatter_max, num_segments, indices_are_sorted, unique_indices, bucket_size, jnp.max, mode=mode)
2e98814bd37be39cc7abadd0cb795471e267d050
3,631,501
from typing import List from typing import Any from typing import Optional def _recursive_pad(nested: List[Any], fill_value: Optional[Any] = None) -> np.array: """Pads a jagged nested list of lists with the given value such that a proper multi-dimensional array can be formed with rectangular shape. The padding appends to the incomplete lists. Example:: >>> _recursive_pad([[], [1], [2, 3], [4]], fill_value=0) # doctest: +NORMALIZE_WHITESPACE array([[0, 0], [1, 0], [2, 3], [4, 0]], dtype=object) """ # code adapted from stackexchange: # https://codereview.stackexchange.com/questions/222623/pad-a-ragged-multidimensional-array-to-rectangular-shape dimensions = _get_max_shape(nested) result = np.full(dimensions, fill_value, dtype=object) for index, value in _iterate_nested_array(nested): result[index] = value return result
2ab19444f0b3e2e865d51f24b590de5a1d814c96
3,631,502
def create_trigger_body(trigger): """Given a trigger, remove all keys that are specific to that trigger and return keys + values that can be used to clone another trigger https://googleapis.github.io/google-api-python-client/docs/dyn/tagmanager_v2.accounts.containers.workspaces.triggers.html#create :param trigger: [description] :type trigger: [type] """ body = {} non_mutable_keys = [ "accountId", "containerId", "fingerprint", "parentFolderId", "path", "tagManagerUrl", "triggerId", "workspaceId", ] for k, v in trigger.items(): if k not in non_mutable_keys: body[k] = v return body
3b324407e77c1f17a5f76f82181db4976966e21b
3,631,503
from typing import Union def is_less_or_equal(hash_1: Union[str, bytes], hash_2: Union[str, bytes]) -> bool: """check hash result.""" if isinstance(hash_1, str): hash_1 = utils.hex_str_to_bytes(hash_1) if isinstance(hash_2, str): hash_2 = utils.hex_str_to_bytes(hash_2) assert isinstance(hash_1, bytes) assert isinstance(hash_2, bytes) return utils.bytes_to_int(hash_1) <= utils.bytes_to_int(hash_2)
1633aa11587669d67ddd49eae9d73ab061e7e69b
3,631,504
def get_video_id(url): """ Get YouTube video ID from YouTube URL Args: url (str): YouTube URL. Returns: YouTube id """ if not url: return "" # If URL is embedded if "embed" in url: return url.split("/")[-1] parse_result = urlparse(url) query = parse_qs(parse_result.query) return query["v"][0]
9253bb3c11a4ed0ceaddfcb5b848de9157d9b290
3,631,505
import sys def running_under_virtualenv(): # type: () -> bool """ Return True if we're running inside a virtualenv, False otherwise. """ if hasattr(sys, 'real_prefix'): # pypa/virtualenv case return True elif sys.prefix != getattr(sys, "base_prefix", sys.prefix): # PEP 405 venv return True return False
8266c6d3dd7fe05e51797208fc978c481093f8d0
3,631,506
def _tile_to_image_size(tensor, image_shape): """Inserts `image_shape` dimensions after `tensor` batch dimension.""" non_batch_dims = len(tensor.shape) - 1 for _ in image_shape: tensor = tf.expand_dims(tensor, axis=1) tensor = tf.tile(tensor, [1] + image_shape + [1] * non_batch_dims) return tensor
57b460e58e3e9c705af62c87479ce6d4c81c787b
3,631,507
def icp3d(src, trgt, abs_tol=1e-8, max_iter=500, verbose=True): """ Parameters ---------- src : numpy array Source object. Each row should be a point with (X, Y, Z) columns. trgt : numpy array Target object. Each row should be a point with (X, Y, Z) columns. abs_tol : float, optional Absolute tolerance for convergence. The default is 1e-8. max_iter : int, optional Maximum number of iterations. The default is 500. verbose : boolean, optional Whether to enable output. The default is True. Returns ------- t : numpy array Translation matrix. R: numpy array Rotation matrix. src_trans : numpy array Transformed source points. """ def least_squares(p, q, verbose=1): P = np.mean(p, axis=0) Q = np.mean(q, axis=0) pp = p-P qq = q-Q H = np.dot(pp.T, qq) U, S, Vt = np.linalg.svd(H) X = np.dot(Vt.T, U.T) if np.linalg.det(X) < 0: S_zero = np.nonzero(S == 0)[0] if S_zero.size > 0: Vt[S_zero, :] *= -1 X = np.dot(Vt.T, U.T) else: if verbose: print('None of the singular values are 0. ' + 'Conventional least-squares is probably not appropriate.') return np.zeros((3, 1)), np.eye(3) return Q.T - np.dot(X, P.T), X p = np.copy(src) q = np.copy(trgt) kd = KDTree(q) prev_mse = 0.0 for i in range(max_iter): nns = kd.query(p) mse = (nns[0] ** 2).mean() if abs(mse - prev_mse) < abs_tol: if verbose: print('Converged in {} iterations.'.format(i)) break prev_mse = mse t, R = least_squares(p, q[nns[1]], verbose=verbose) p = np.dot(p, R.T) + t.T if verbose and i == max_iter-1: print('Maximum number of iterations reached.') t, R = least_squares(src, p) return t, R, np.dot(src, R.T) + t.T, nns
969dc9bc2b0fce2933c7a31d45ab98089619e7d1
3,631,508
def event_type(event): """ .. function:: event_type(event) Return pygame event type. """ return getattr(pygame, event)
de49421703a98df43ac57a5beea7acb042eaa8ff
3,631,509
def ULA(step, N, n): """ MCMC ULA Args: step: stepsize of the algorithm N: burn-in period n: number of samples after the burn-in Returns: traj: a numpy array of size (n, d), where the trajectory is stored traj_grad: numpy array of size (n, d), where the gradients of the potential U along the trajectory are stored """ traj = np.zeros((n, d)) traj_grad = np.zeros((n, d)) x = np.random.normal(scale=1.0, size=d) # initial value X_0 for k in np.arange(N): # burn-in period x = x - step * potential.gradpotential(x) \ + np.sqrt(2*step)*np.random.normal(size=d) print(x) for k in np.arange(n): # samples grad = potential.gradpotential(x) traj[k,]=x traj_grad[k,]=grad x = x - step * grad + np.sqrt(2*step)*np.random.normal(size=d) return (traj, traj_grad)
36772e2988f35e408fc72da8bb71a78de602aee8
3,631,510
import logging def get_parameters(parameters): """Get parameters from a function definition""" params_out = {} for parameter in parameters: param_out = {} # TODO # Resolve meta refs # if "$ref" in param.keys(): # meta_path = param["$ref"].split("/") # del meta_path[0] # print(meta_path) # val = definition # for nested_key in meta_path: # val = val[nested_key] # print(val) # param = val # # continue if "description" not in parameter.keys(): logging.warning("No description for %s", parameter["name"]) param_out["description"] = "" continue param_out["description"] = parameter["description"] name = safe_variable(parameter["name"]) if "required" in parameter.keys(): # If not required then add the default of None if not parameter["required"]: param_out["defaultValue"] = "None" if "type" in parameter.keys(): param_out["type"] = normalise_type(parameter["type"]) params_out[name] = param_out return params_out
f66f3d5f0799cbbec38af57f324671521b38ec32
3,631,511
def openbabel_mol_to_rdkit_mol(obmol: 'openbabel.OBMol', remove_hs: bool = False, sanitize: bool = True, embed: bool = True, ) -> 'RWMol': """ Convert a OpenBabel molecular structure to a Chem.rdchem.RWMol object. Args: obmol (Molecule): An OpenBabel Molecule object for the conversion. remove_hs (bool, optional): Whether to remove hydrogen atoms from the molecule, Defaults to False. sanitize (bool, optional): Whether to sanitize the RDKit molecule. Defaults to True. embed (bool, optional): Whether to embeb 3D conformer from OBMol. Defaults to True. Returns: RWMol: A writable RDKit RWMol instance. """ rw_mol = Chem.rdchem.RWMol() for obatom in ob.OBMolAtomIter(obmol): atom = Chem.rdchem.Atom(obatom.GetAtomicNum()) isotope = obatom.GetIsotope() if isotope != 0: atom.SetIsotope(isotope) spin = obatom.GetSpinMultiplicity() if not remove_hs: atom.SetNoImplicit(True) if spin == 2: # radical atom.SetNumRadicalElectrons(1) elif spin in [1, 3]: # carbene # TODO: Not sure if singlet and triplet are distinguished atom.SetNumRadicalElectrons(2) atom.SetFormalCharge(obatom.GetFormalCharge()) if not (remove_hs and obatom.GetAtomicNum == 1): rw_mol.AddAtom(atom) for bond in ob.OBMolBondIter(obmol): # Atom indexes in Openbabel is 1-indexed, so we need to convert them to 0-indexed atom1_idx = bond.GetBeginAtomIdx() - 1 atom2_idx = bond.GetEndAtomIdx() - 1 # Get the bond order. For aromatic molecules, the bond order is not # 1.5 but 1 or 2. Manually set them to 1.5 bond_order = bond.GetBondOrder() if bond_order not in [1, 2, 3, 4] or bond.IsAromatic(): bond_order = 1.5 rw_mol.AddBond(atom1_idx, atom2_idx, ORDERS[bond_order]) # Rectify the molecule if remove_hs: rw_mol = Chem.RemoveHs(rw_mol, sanitize=sanitize) elif sanitize: Chem.SanitizeMol(rw_mol) # If OBMol has 3D information, it can be embed to the RDKit Mol if embed and obmol.HasNonZeroCoords(): coords = get_obmol_coords(obmol) AllChem.EmbedMolecule(rw_mol) set_rdconf_coordinates(rw_mol.GetConformer(), coords) return rw_mol
ab30d50013aa7f8d0c45a7d502471afb42f8aa57
3,631,512
def get_display_name(record): """Get the display name for a record. Args: record A record returned by AWS. Returns: A display name for the bucket. """ return record["Name"]
a34c1c416cc41ae5f0087ba471d75b4bc5c87216
3,631,513
import math def RadialToTortoise(r, M): """ Convert the radial coordinate to the tortoise coordinate r = radial coordinate M = ADMMass used to convert coordinate return = tortoise coordinate value """ return r + 2. * M * math.log( r / (2. * M) - 1.)
1bbfad661d360c99683b3c8fbe7a9c0cabf19686
3,631,514
from typing import Set def extract_leaves( tree_dict: StrDict, ) -> Set[str]: """ Extract a set with the SMILES of all the leaf nodes, i.e. starting material :param tree_dict: the route :return: a set of SMILE strings """ def traverse(tree_dict: StrDict, leaves: Set[str]) -> None: children = tree_dict.get("children", []) if children: for child in children: traverse(child, leaves) else: leaves.add(tree_dict["smiles"]) leaves = set() traverse(tree_dict, leaves) return leaves
c932426f8d308a840690347bdd41af402bf6880a
3,631,515
def station_matcher( data_stream_df, ses_directory="../data/seattle_ses_data/ses_data.shp"): """ Matches Purple Air data with census tracts This function reads in the census-tract-level socioenconomic dataset and joins it with the input Purple Air DataStreams. Args: data_stream_df (pandas dataframe): dataframe containing one row for each DataStream CSV file. Columns must include "lat" and "lon". A suitable dataframe can be generated by air.files_to_dataframe(). Returns: new_ses_data (pandas dataframe): tract-level socioeconomic dataset with an added column containing the paths to the CSV files for DataStreams contained within each tract. The CSV file paths are provided as a single string, with paths separated by commas. """ # Load socioeconomic dataset. ses_file = ses_directory ses_data = gpd.read_file(ses_file) new_ses_data = ses_data.to_crs(epsg=4326) # Convert input to GeoDataFrame using lat/lon. data_stream_gdf = gpd.GeoDataFrame( data_stream_df, geometry=gpd.points_from_xy( data_stream_df["lon"], data_stream_df["lat"]), crs="EPSG:4326") # Join the two dataframes (must use "inner" to rerain sensor file names). combined = sjoin(new_ses_data, data_stream_gdf, how="inner", op="intersects") # Combine rows from same tract ('NAME10' is census tract name). grouped = combined.groupby("NAME10") # Make new column containing CSV file names separated by commas. aggregate = grouped.agg( all_names=pd.NamedAgg(column="file", aggfunc=",".join)) # Add CSV file names to SES dataset new_ses_data["data_stream_file_names"] = new_ses_data.apply( lambda row: get_stream_names(aggregate, row["NAME10"]), axis=1) # Number of datastream CSV files for each tract. new_ses_data["datastream_counts"] = new_ses_data.apply( lambda row: count_csv_files(row["data_stream_file_names"]), axis=1) # Number of Sensors in each tract (number of CSV files divided by 4). new_ses_data["sensor_counts"] = new_ses_data["datastream_counts"] / 4 # Give SES fields friendlier names. new_ses_data = new_ses_data.rename(columns=ses_name_mappings) return new_ses_data
9b4c1293d7987138d17c9ec6606fff18d4a6e8c6
3,631,516
def running(pid): """ pid: a process id Return: False if the pid is None or if the pid does not match a currently-running process. Derived from code in http://pypi.python.org/pypi/python-daemon/ runner.py """ if pid is None: return False try: os.kill(pid, signal.SIG_DFL) except OSError, exc: if exc.errno == errno.ESRCH: # The specified PID does not exist return False return True
622951e6d5c2f832516e00a607e6ac612ce365a9
3,631,517
import requests import json def get_pulls_list(project, github_api=3): """get pull request list github_api : version of github api to use """ if github_api == 3: url = f"https://api.github.com/repos/{project}/pulls" else: url = f"http://github.com/api/v2/json/pulls/{project}" response = requests.get(url) response.raise_for_status() if github_api == 2: return json.loads(response.text)["pulls"] return json.loads(response.text)
891c99d53faa5fb89960e5bb52c85e42f6003c42
3,631,518
import os import re def get_version(*file_paths): """Retrieves the version from flexible_reports/__init__.py""" filename = os.path.join(os.path.dirname(__file__), *file_paths) version_file = open(filename).read() version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError('Unable to find version string.')
37439dc2f5aa1c49cd82978c26f4092b21fbad29
3,631,519
import urllib import json def get_articles(id): """ Function that gets the json response to our url request """ get_sources_news_url = source_url.format(id,api_key) with urllib.request.urlopen(get_sources_news_url)as url: get_news_data = url.read() get_news_response = json.loads(get_news_data) news_results = None if get_news_response['articles']: news_results_list = get_news_response['articles'] print(news_results_list) news_results = process_articles(news_results_list) return news_results
5a2e2410561302b4023746559edd28102ccaa527
3,631,520
def read_chunk(path, start_offset, end_offset, delete_me_entire_func_maybe): """ Return only if 100% successful. """ try: with open(path, 'rb') as f: f.seek(start_offset) return f.read(end_offset - start_offset) except FileNotFoundError as e: raise e
e45c948bcb7f75fdf0eecac8289e4323b2d88dfe
3,631,521
def returnBestAddress(genes, loop): """Searches for available genes matching kegg enzyme entry. This function searches 'sequentially'. It returns the best available model organism genes. Organisms phylogenetically closer to Cricetulus griseus are preferred, but they are chosen by approximation. A detailed study of the phylogenetic tree has not been done for this project. Hopefully going sequentially increases both readability and efficiency. Parameters ---------- genes : dict key: value pair is organism: address loop : string Indicates the highest potential group of matching organisms to search in. Returns ------- dict key: kegg organism code. value: gene addresses for enzyme and organism """ if loop == 'best': if 'CGE' in genes: return genes['CGE'] elif 'MMU' in genes: return genes['MMU'] elif 'RNO' in genes: return genes['RNO'] elif 'HSA' in genes: return genes['HSA'] else: loop = 'mammals' if loop == 'mammals': mammal_match = set(genes.keys()).intersection(mammals) if bool(mammal_match): return mammal_match else: loop = 'vertebrates' if loop == 'vertebrates': animal_match = set(genes.keys()).intersection(animals) if bool(animal_match): return animal_match else: loop = 'csm' # Stands for "common simple models" if loop == 'csm': if 'DME' in genes: return genes['DME'] elif 'SCE' in genes: return genes['SCE'] elif 'ECO' in genes: return genes['ECO']
af38d9456120dbbe99a243764dea20e52d0ba3c1
3,631,522
from typing import Any def is_name_like_value( value: Any, allow_none: bool = True, allow_tuple: bool = True, check_type: bool = False ) -> bool: """ Check the given value is like a name. Examples -------- >>> is_name_like_value('abc') True >>> is_name_like_value(1) True >>> is_name_like_value(None) True >>> is_name_like_value(('abc',)) True >>> is_name_like_value(1.0j) True >>> is_name_like_value(list('abc')) False >>> is_name_like_value(None, allow_none=False) False >>> is_name_like_value(('abc',), allow_tuple=False) False >>> is_name_like_value(1.0j, check_type=True) False """ if value is None: return allow_none elif isinstance(value, tuple): return allow_tuple and is_name_like_tuple( value, allow_none=allow_none, check_type=check_type ) elif is_list_like(value) or isinstance(value, slice): return False elif check_type: return as_spark_type(type(value), raise_error=False) is not None else: return True
f465c0e660399c4c330dc08d24cde479dfd0ff47
3,631,523
def get_uptime(then): """ then: datetime instance | string Return a string that informs how much time has pasted from the provided timestamp. """ if isinstance(then, str): then = dt.datetime.strptime(then, "%Y-%m-%dT%H:%M:%SZ") now = dt.datetime.now() diff = now - then.replace(tzinfo=None) days = diff.days hours = int(diff.seconds / 3600) mins = int((diff.seconds % 3600) / 60) age = "" if days > 0: if days == 1: age = str(days) + " day" else: age = str(days) + " days" else: if hours > 0: if hours == 1: age = str(hours) + " hour" else: age = str(hours) + " hours" else: if mins == 0: return "just now" if mins == 1: age = str(mins) + " min" else: age = str(mins) + " mins" return age + " ago"
b1110c9c3edfd4960405b74da8da57e5f455775d
3,631,524
import subprocess import click def _create_kube_config_gcloud_entry(cluster_name, cluster_zone, project): """Uses GCloud CLI to create an entry for Kubectl. This is needed as we install the charts using kubectl, and it needs the correct config Args: cluster_name (str): Name of cluster cluster_zone (str): Zone of cluster project (str): Current used project Returns: (str): Kube context for the cluster """ p = subprocess.Popen( [ "gcloud", "container", "clusters", "get-credentials", cluster_name, "--zone", cluster_zone, "--project", project, ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) output, error = p.communicate() if p.returncode != 0: raise click.UsageError( "Failed to add kube config entry:\n {}".format(error.decode()) ) context_name = "gke_{}_{}_{}".format(project, cluster_zone, cluster_name) return context_name
b161e397445393bde989b6ae9dec1353fec38329
3,631,525
import torch def gaussian2kp(heatmap, kp_variance='matrix', clip_variance=None): """ Extract the mean and the variance from a heatmap """ shape = heatmap.shape #adding small eps to avoid 'nan' in variance heatmap = heatmap.unsqueeze(-1) + 1e-7 grid = make_coordinate_grid(shape[3:], heatmap.type()).unsqueeze_(0).unsqueeze_(0).unsqueeze_(0) mean = (heatmap * grid).sum(dim=(3, 4)) kp = {'mean': mean.permute(0, 2, 1, 3)} if kp_variance == 'matrix': mean_sub = grid - mean.unsqueeze(-2).unsqueeze(-2) var = torch.matmul(mean_sub.unsqueeze(-1), mean_sub.unsqueeze(-2)) var = var * heatmap.unsqueeze(-1) var = var.sum(dim=(3, 4)) var = var.permute(0, 2, 1, 3, 4) if clip_variance: min_norm = torch.tensor(clip_variance).type(var.type()) sg = smallest_singular(var).unsqueeze(-1) var = torch.max(min_norm, sg) * var / sg kp['var'] = var elif kp_variance == 'single': mean_sub = grid - mean.unsqueeze(-2).unsqueeze(-2) var = mean_sub ** 2 var = var * heatmap var = var.sum(dim=(3, 4)) var = var.mean(dim=-1, keepdim=True) var = var.unsqueeze(-1) var = var.permute(0, 2, 1, 3, 4) kp['var'] = var return kp
5953e9ef4e0717341f01555e227868a5a4b2fc2d
3,631,526
def BRepBlend_HCurve2dTool_Circle(*args): """ :param C: :type C: Handle_Adaptor2d_HCurve2d & :rtype: gp_Circ2d """ return _BRepBlend.BRepBlend_HCurve2dTool_Circle(*args)
e3087d0e9e1505b47d10066b2a4ab25d72b15de2
3,631,527
import inspect import math def patchMath(): """ Overload various math functions to work element-wise on iterables >>> A = Array([[0.0, pi/4.0], [pi/2.0, 3.0*pi/4.0], [pi, 5.0*pi/4.0], [3.0*pi/2.0, 7.0*pi/4.0]]) >>> print(round(A,2).formated()) [[0.0, 0.79], [1.57, 2.36], [3.14, 3.93], [4.71, 5.5]] >>> print(degrees(A).formated()) [[0.0, 45.0], [90.0, 135.0], [180.0, 225.0], [270.0, 315.0]] >>> print(round(sin(A), 2).formated()) [[0.0, 0.71], [1.0, 0.71], [0.0, -0.71], [-1.0, -0.71]] """ mathfn = inspect.getmembers(math, inspect.isbuiltin) for mfn in mathfn: fname = mfn[0] basefn = mfn[1] newfn = _patchfn(basefn) _thisModule.__setattr__(fname, newfn) def patchable_mathutils_func(f): if not inspect.isfunction(f): return False return f.__module__ == 'pymel.util.mathutils' mathutilsfn = inspect.getmembers(mathutils, patchable_mathutils_func) for mfn in mathutilsfn: fname = mfn[0] basefn = mfn[1] newfn = _patchfn(basefn) _thisModule.__setattr__(fname, newfn) # builtins that do not need to be manually redefined, curently only abs _thisModule.__setattr__('abs', _patchfn(_abs))
da29f21ee08bfee29d9cd62148384fbd0fa9ede7
3,631,528
def iddr_rid(m, n, matvect, k): """ Compute ID of a real matrix to a specified rank using random matrix-vector multiplication. :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matvect: Function to apply the matrix transpose to a vector, with call signature `y = matvect(x)`, where `x` and `y` are the input and output vectors, respectively. :type matvect: function :param k: Rank of ID. :type k: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ idx, proj = _id.iddr_rid(m, n, matvect, k) proj = proj[:k*(n-k)].reshape((k, n-k), order='F') return idx, proj
7878a49dfa4e0c7c4530e16fb904a6778ee2aa3d
3,631,529
def MediumOverLong(lengths): """ A measure of how needle or how plate-like a molecules is. 0 means perfect needle shape 1 means perfect plate-like shape ShortOverLong = Medium / Longest """ return lengths[1]/lengths[2]
48a053b55b39a50d7b0f618f843d370a55220765
3,631,530
import base64 from pathlib import Path def cbase64(obj, mode: int = 1, to_file: t.Union[str, Path] = None, altchars=None, validate=False): """ base64加密与解密 使用示例: # 1)针对字符 obj = b'这是一个示例' cobj = crypto.cbase64(obj) # 2)针对文件 obj = 'D:/tmp/t.txt' to_file = 'D:/tmp/t.c' cobj = crypto.cbase64(obj, to_file=to_file) # 另:解密详见参数 # res: 返回相应结果 +++++[更多详见参数或源码]+++++ :param obj: 对象 :param mode: 模式(1-加密;2-解密) :param to_file: 输出指定文件(针对obj为文件对象) :param altchars: altchars(2 bytes, 用于指定替换'/'和'+') :param validate: :return: """ mode = choicer(mode, choices=[1, 2], lable='mode') if to_file: with open(obj, 'rb') as f, open(to_file, 'wb') as f2: for line in f.readlines(): if mode == 1: f2.write(base64.b64encode(line, altchars)) f2.write(b'\n') else: f2.write(base64.b64decode(line, altchars)) return to_file else: if mode == 1: _obj = base64.b64encode(obj, altchars) else: _obj = base64.b64decode(obj, altchars, validate) return _obj
bb70ddb23185e8934c2c31400f4eb0c0adaee202
3,631,531
from typing import Optional import os import re def change_suffix(fname: str, new_suffix: str, old_suffix: Optional[str] = None) -> str: """Change suffix of filename. Changes suffix of a filename. If no old suffix is provided, the part that is replaced is guessed. Args: fname: Filename to process. new_suffix: Replace old suffix with this. old_suffix: (Optional) Old suffix of filename - must be part of filename. Default = None. Returns: Filename with replaced suffix. Examples: >>> change_suffix("test.txt.gz", "") "test.txt" >>> change_suffix("test.sorted.txt.gz", ".txt", ".sorted.txt.gz") "test.txt" """ if not old_suffix: old_suffix = os.path.splitext(fname)[1] return str(re.sub(old_suffix + "$", new_suffix, fname))
ae9f4c05d88d8d293e59d8e8a58b8961a36c568c
3,631,532
def segment_objects(white_cloud): """ Cluster extraction and create cluster mask """ tree = white_cloud.make_kdtree() # Create a cluster extraction object ec = white_cloud.make_EuclideanClusterExtraction() # Set tolerances for distance threshold # as well as minimum and maximum cluster sizes (in points) ec.set_ClusterTolerance(0.025) ec.set_MinClusterSize(50) ec.set_MaxClusterSize(20000) ec.set_SearchMethod(tree) cluster_indices = ec.Extract() return cluster_indices
590c3d75a1739128d97e998a601e92e335507915
3,631,533
import sys def new_markers(): """ Read name of packages from scripts arguments and create set of them. """ new_markers = set() for x in sys.argv[1:]: if check_package(x): new_markers.add(x) else: error(f"ERROR: Package {x} not installed in system. Can't mark it.") return new_markers
42bfebc5669e211fdc2c1fa039293bf35c46d9fb
3,631,534
def form(): """Dummy endpoint for demonstration purposes.""" return [ ActionFormField( name='email_address', label='Email Address', description='Email address to send PowerPoint document', required=True, ), ActionFormField( name='email_subject', label='Subject', description='Email subject line', required=True, ), ActionFormField( name='email_body', label='Body', description='Email body text', required=True, type='textarea' ), ActionFormField( name='filename', label='Filename', description='Filename for the generated Word document', required=True, ), ]
9b10b3621f39d061c448d3e3d0c512dc0d1639fe
3,631,535
import cmd def task_pgtune_tune(): """ pgtune: Apply Greg Smith's pgtune. """ def alter_sql(): with open(PGTUNE_CONF, "r") as f: for line in f: if "=" in line: key, val = [s.strip() for s in line.split("=")] sql = f"ALTER SYSTEM SET {key}='{val}'" cmd = CmdAction( f'PGPASSWORD={DEFAULT_PASS} psql --host=localhost --dbname={DEFAULT_DB} --username={DEFAULT_USER} --command="{sql}"' ) cmd.execute() print(cmd, cmd.out.strip(), cmd.err.strip()) return { "actions": [ f"touch {BUILD_PATH / 'postgresql.conf'}", ( f"python2 {PGTUNE} " f"--input-config={BUILD_PATH / 'postgresql.conf'} " f"--output-config={PGTUNE_CONF} " # The --version parameter doesn't have support for newer versions of PostgreSQL. # f"--version=$(pg_config | grep VERSION | cut -d' ' -f 4) " f"--type=Mixed " ), alter_sql, lambda: cmd.sudo["systemctl"]["restart", "postgresql"].run_fg(), "until pg_isready ; do sleep 1 ; done", ], "file_dep": [PGTUNE], "uptodate": [False], "verbosity": VERBOSITY_DEFAULT, }
489e8ab45b139620ca621be5600f6c61dbf81210
3,631,536
def rescale_exchange(exc, value, remove_uncertainty=True): """Dummy function to rescale exchange amount and uncertainty. This depends on some code being separated from Ocelot, which will take a bit of time. * ``exc`` is an exchange dataset. * ``value`` is a number, to be multiplied by the existing amount. * ``remove_uncertainty``: Remove (unscaled) uncertainty data, default is ``True``. Returns the modified exchange.""" assert isinstance(exc, dict), "Must pass exchange dictionary" assert isinstance(value, Number), "Constant factor ``value`` must be a number" exc['amount'] *= value FIELDS = ('shape', 'size', 'minimum', 'maximum') if remove_uncertainty: exc['uncertainty type'] = 0 exc['loc'] = exc['amount'] for field in FIELDS: if field in exc: del exc[field] return exc
b3fee3bc20632563722b624dd35e4fa6a3a5b9c8
3,631,537
def create(): """ does setup of Tellor contract on Alogrand solidity equivalent: constructor() args: 0) governance address 1) query id 2) query data """ return Seq( [ App.globalPut(tipper, Txn.sender()), # TODO assert application args length is correct App.globalPut(governance_address, Txn.application_args[0]), App.globalPut(query_id, Txn.application_args[1]), App.globalPut(query_data, Txn.application_args[2]), # TODO perhaps parse from ipfs # 0-not Staked, 1=Staked App.globalPut(reporter, Bytes("")), App.globalPut(staking_status, Int(0)), App.globalPut(num_reports, Int(0)), App.globalPut(stake_amount, Int(200000)), # 200 dollars of ALGO Approve(), ] )
d875d137d66b00d33241c5bd25e977e70d903f45
3,631,538
import json import os def task(args): """Find the adequate limits for a task.""" solutionList = args.path[:] if args.usecorrect or len(args.path) == 0: try: taskSettings = json.load(open(os.path.join(args.taskpath, 'taskSettings.json'), 'r')) correctSolutions = taskSettings['correctSolutions'] solutionList.extend(map(lambda c: c['path'].replace('$TASK_PATH', args.taskpath), correctSolutions)) except: print("Warning: unable to add correctSolutions.") if len(solutionList) == 0: print("Error: no solutionList given on command-line, and no correctSolutions added.") return 1 print("Searching limits with %d solutions." % len(solutionList)) results = findLimits(args.taskpath, solutionList) print(""" Limits found: Time: %(maxTimeLimit)dms / Memory: %(maxMemLimit)dKb (maximum time used: %(maxTime)dms, memory used: %(maxMem)dKb)""" % results) return 0
6aed71c44ad23e890f1d419239208da9409e8004
3,631,539
def merge_channels(channels): """ Takes a list of channels as input and outputs the image obtained by merging the channels """ return channels[0] if len(channels) == 1 else cv2.merge(tuple(channels))
7eff099248f40d8c166c711d341834d5db0c1b7f
3,631,540
import warnings def rng(spec=None, *, legacy=False): """ Get a random number generator. This is similar to :func:`sklearn.utils.check_random_seed`, but it usually returns a :class:`numpy.random.Generator` instead. .. warning:: This method is deprecated. Use :func:`seedbank.numpy_rng` instead. Args: spec: The spec for this RNG. Can be any of the following types: * ``int`` * ``None`` * :class:`numpy.random.SeedSequence` * :class:`numpy.random.mtrand.RandomState` * :class:`numpy.random.Generator` legacy(bool): If ``True``, return :class:`numpy.random.mtrand.RandomState` instead of a new-style :class:`numpy.random.Generator`. Returns: numpy.random.Generator: A random number generator. """ warnings.warn('rng is deprecated, use seedbank.numpy_rng', DeprecationWarning) if legacy: return seedbank.numpy_random_state(spec) else: return seedbank.numpy_rng(spec)
63b6cfa03c336c31f47da7b215888335b37da5e4
3,631,541
def create_sdcard_tar (adb,tarpath): """ Returns the remote path of the tar file containing the whole WhatsApp directory from the SDcard """ tarname = '/sdcard/whatsapp_' + ''.join(random.choice(string.letters) for i in xrange(10)) + '.tar' print "\n[+] Creating remote tar file: %s" % tarname cmd = "%s -c /sdcard/WhatsApp -f %s" % (tarpath , tarname) print "\t+ Command: %s" % cmd adb.shell_command(cmd).strip() res = adb.shell_command("ls %s" % tarname).strip() if res == "ls: %s: No such file or directory" % tarname: return None else: return tarname
4d01bedc86c18cb43d53ce4c53be06e1eb7b1232
3,631,542
import pickle import os def table_coverage(class_name, name, root, Xsamp, burnin=500, MC=None, plot=False, movie=False, thinning=1, loadDict=False, CI='quant'): """ :param class_name: :param name: :param root: :param burnin: :param MC: :param plot: :return: """ plt.close('all') interval = 0.95 model = MixtureOfExperts.models.load_model(class_name, name, root) #print(model) ybins = np.linspace(np.min(model.y) * 3, np.max(model.y) * 3, 500) # Make/get conditional predictions for expectation/density over (latent) response if loadDict is True: try: with open(root + 'coverage_dictionary.pkl', 'rb') as f: covDict = pickle.load(f) except: raise RuntimeError('Unable to load coverage dictionary') else: sampDict = plots_sample(model, Xsamp, ybins, burnin=burnin, MC=MC, thinning=thinning) y_samp = data_generating_function_sample(Xsamp[:, 0], mu1=3., mu2=5., tau1=0.8, tau2=0.8, coef1=-0.2, coef2=-0.2, factor1=0.6, factor2=0.4) normaliser = np.sum(sampDict['D_samp'][0], axis=0) covDict = {**sampDict, 'y_samp': y_samp, 'normaliser': normaliser} with open(root + '/coverage_dictionary.pkl', 'wb') as f: pickle.dump(covDict, f, pickle.HIGHEST_PROTOCOL) #for key in covDict: # print(key, 'corresponds to item with shape', np.shape(covDict[key])) # Unpack density_samp = covDict['D_samp'][0] y_samp = covDict['y_samp'] normaliser = covDict['normaliser'] Xsamp = covDict['Xsamp'] #print(1/normaliser) #print(ybins[1]-ybins[0]) #print(np.sum(density_samp[:, 0]/normaliser[0])) #print(np.cumsum(density_samp[:, 0]/normaliser[0])) bounds = [None] * Xtest.shape[0] if movie is not False: frame_counter = 0 for xi in range(Xtest.shape[0]): norm_dens_xi = density_samp[:, xi]/normaliser[xi] cum_norm_dens_xi = np.cumsum(norm_dens_xi) if CI == 'quant': bounds[xi] = MixtureOfExperts.utils.quant(ybins, norm_dens_xi, interval=interval) elif CI == 'hpd': bounds[xi] = MixtureOfExperts.utils.hpd(ybins, norm_dens_xi, interval=interval) elif CI == 'hpd_union': bounds[xi] = MixtureOfExperts.utils.hpd_union(ybins, norm_dens_xi, interval=interval) else: raise ValueError('invalid interval') if movie is not False: if True: #not any(bounds[xi, 0] <= y_samp[xi] <= bounds[xi, 1]): # add condition on frames shown scaled_norm_dens_xi = (norm_dens_xi / np.max(norm_dens_xi)) / 2 plt.plot(ybins, scaled_norm_dens_xi) plt.plot(ybins, cum_norm_dens_xi) plt.scatter(y_samp[xi], 0) for bound in range(len(bounds[xi])): plt.plot([bounds[xi][bound][0], bounds[xi][bound][0]], [0, 1], c='k') plt.plot([bounds[xi][bound][1], bounds[xi][bound][1]], [0, 1], c='k') plt.title(Xsamp[xi, 0]) if movie is not False: #TODO: fix conditioning of movie fname = root + name + '/_tmp%06d.png' % frame_counter frame_counter += 1 plt.savefig(fname) plt.close() else: plt.show() plt.close() if movie is not False: if movie is not 'save': subfolder = movie else: subfolder = '' try: os.system("rm " + root + name + subfolder + "/coverage_movie_" + CI + ".mp4") print('Deleted old movie') except: pass os.system("ffmpeg -r " + str(5) + " -i " + root + name + "/_tmp%06d.png " + root + name + subfolder + "/coverage_movie_" + CI + ".mp4") os.system("rm " + root + name + "/_tmp*.png") print('Removed temporary frame files') count_in = 0 widths = np.zeros((Xtest.shape[0])) for xi in range(Xtest.shape[0]): c = 'r' for bound in range(len(bounds[xi])): # coverage count if bounds[xi][bound][0] <= y_samp[xi] <= bounds[xi][bound][1]: c = 'b' count_in += 1 # credible interval width widths[xi] += bounds[xi][bound][1] - bounds[xi][bound][0] if plot is not False: for bound in range(len(bounds[xi])): plt.plot(bounds[xi][bound] - y_samp[xi], [Xsamp[xi, 0], Xsamp[xi, 0]], c=c) if plot is not False: plt.xlim((-2,2)) if plot is True: plt.show() elif plot is 'save': plt.savefig(root + "/coverage_" + CI + ".eps", format='eps') elif isinstance(plot, str): plt.savefig(root + plot + "/coverage_" + CI + ".eps", format='eps') else: raise ValueError coverage = count_in / Xtest.shape[0] plt.close('all') print('{0} out of {1}: {2}\%'.format(count_in, Xtest.shape[0], coverage)) print('Average credible interval width {0}'.format(np.mean(widths))) return coverage, np.mean(widths)
1b2998769dce95c35f9041d0da3ead564941ed06
3,631,543
def roll_zeropad(a, shift, axis=None): """ Roll array elements along a given axis. Elements off the end of the array are treated as zeros. Args: a: array_like Input array. shift: int The number of places by which elements are shifted. axis (int): optional, The axis along which elements are shifted. By default, the array is flattened before shifting, after which the original shape is restored. Returns: ndarray: Output array, with the same shape as `a`. Note: roll : Elements that roll off one end come back on the other. rollaxis : Roll the specified axis backwards, until it lies in a given position. Examples: >>> x = np.arange(10) >>> roll_zeropad(x, 2) array([0, 0, 0, 1, 2, 3, 4, 5, 6, 7]) >>> roll_zeropad(x, -2) array([2, 3, 4, 5, 6, 7, 8, 9, 0, 0]) >>> x2 = np.reshape(x, (2,5)) >>> x2 array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) >>> roll_zeropad(x2, 1) array([[0, 0, 1, 2, 3], [4, 5, 6, 7, 8]]) >>> roll_zeropad(x2, -2) array([[2, 3, 4, 5, 6], [7, 8, 9, 0, 0]]) >>> roll_zeropad(x2, 1, axis=0) array([[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]]) >>> roll_zeropad(x2, -1, axis=0) array([[5, 6, 7, 8, 9], [0, 0, 0, 0, 0]]) >>> roll_zeropad(x2, 1, axis=1) array([[0, 0, 1, 2, 3], [0, 5, 6, 7, 8]]) >>> roll_zeropad(x2, -2, axis=1) array([[2, 3, 4, 0, 0], [7, 8, 9, 0, 0]]) >>> roll_zeropad(x2, 50) array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) >>> roll_zeropad(x2, -50) array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) >>> roll_zeropad(x2, 0) array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) """ a = np.asanyarray(a) if shift == 0: return a if axis is None: n = a.size reshape = True else: n = a.shape[axis] reshape = False if np.abs(shift) > n: res = np.zeros_like(a) elif shift < 0: shift += n zeros = np.zeros_like(a.take(np.arange(n - shift), axis)) res = np.concatenate((a.take(np.arange(n - shift, n), axis), zeros), axis) else: zeros = np.zeros_like(a.take(np.arange(n - shift, n), axis)) res = np.concatenate((zeros, a.take(np.arange(n - shift), axis)), axis) if reshape: return res.reshape(a.shape) else: return res
97d29b4aff48580367d6c0ed474ca1ba020e2cf8
3,631,544
from typing import Callable from typing import Any from typing import Coroutine def callable_to_coroutine(func: Callable, *args: Any, **kwargs: Any) -> Coroutine: """Transform callable to coroutine. Arguments: func: function that can be sync or async and should be transformed into corouine. args: positional arguments for this function. kwargs: key arguments for this function. Returns: Coroutine object from passed callable. """ if is_awaitable_object(func): return func(*args, **kwargs) return run_in_threadpool(func, *args, **kwargs)
44fc48295f61ac0b7c74cfa9d9724473afb272ea
3,631,545
import six import warnings def construct_engine(engine, **opts): """.. versionadded:: 0.5.4 Constructs and returns SQLAlchemy engine. Currently, there are 2 ways to pass create_engine options to :mod:`migrate.versioning.api` functions: :param engine: connection string or a existing engine :param engine_dict: python dictionary of options to pass to `create_engine` :param engine_arg_*: keyword parameters to pass to `create_engine` (evaluated with :func:`migrate.versioning.util.guess_obj_type`) :type engine_dict: dict :type engine: string or Engine instance :type engine_arg_*: string :returns: SQLAlchemy Engine .. note:: keyword parameters override ``engine_dict`` values. """ if isinstance(engine, Engine): return engine elif not isinstance(engine, six.string_types): raise ValueError("you need to pass either an existing engine or a database uri") # get options for create_engine if opts.get('engine_dict') and isinstance(opts['engine_dict'], dict): kwargs = opts['engine_dict'] else: kwargs = dict() # DEPRECATED: handle echo the old way echo = asbool(opts.get('echo', False)) if echo: warnings.warn('echo=True parameter is deprecated, pass ' 'engine_arg_echo=True or engine_dict={"echo": True}', exceptions.MigrateDeprecationWarning) kwargs['echo'] = echo # parse keyword arguments for key, value in six.iteritems(opts): if key.startswith('engine_arg_'): kwargs[key[11:]] = guess_obj_type(value) log.debug('Constructing engine') # TODO: return create_engine(engine, poolclass=StaticPool, **kwargs) # seems like 0.5.x branch does not work with engine.dispose and staticpool return create_engine(engine, **kwargs)
10acf0bbee55391d5bdc63038d4f43f451c78818
3,631,546
def array_to_binary(array, start=None, end=None): """Create binary search tree from `array` values via recursion.""" start = 0 if start is None else start end = len(array) - 1 if end is None else end if start > end: return '' mid = (start + end) // 2 node = Node(array[mid]) node.left = array_to_binary(array, start, mid - 1) node.right = array_to_binary(array, mid + 1, end) return node
263fc8869961b3412d61288bd5aa562b8221ae37
3,631,547
async def handle_slack_command(*, db_session, client, request, background_tasks): """Handles slack command message.""" # We fetch conversation by channel id channel_id = request.get("channel_id") conversation = conversation_service.get_by_channel_id_ignoring_channel_type( db_session=db_session, channel_id=channel_id ) # We get the name of command that was run command = request.get("command") incident_id = 0 if conversation: incident_id = conversation.incident_id else: if command not in [SLACK_COMMAND_REPORT_INCIDENT_SLUG, SLACK_COMMAND_LIST_INCIDENTS_SLUG]: # We let the user know that incident-specific commands # can only be run in incident conversations return create_command_run_in_nonincident_conversation_message(command) # We get the list of public and private conversations the Slack bot is a member of ( public_conversations, private_conversations, ) = await dispatch_slack_service.get_conversations_by_user_id_async( client, SLACK_APP_USER_SLUG ) # We get the name of conversation where the command was run conversation_id = request.get("channel_id") conversation_name = await dispatch_slack_service.get_conversation_name_by_id_async( client, conversation_id ) if ( not conversation_name or conversation_name not in public_conversations + private_conversations ): # We let the user know in which public conversations they can run the command return create_command_run_in_conversation_where_bot_not_present_message( command, public_conversations ) user_id = request.get("user_id") user_email = await dispatch_slack_service.get_user_email_async(client, user_id) # some commands are sensitive and we only let non-participants execute them allowed = check_command_restrictions( command=command, user_email=user_email, incident_id=incident_id, db_session=db_session ) if not allowed: return create_command_run_by_non_privileged_user_message(command) for f in command_functions(command): background_tasks.add_task(f, incident_id, command=request) return INCIDENT_CONVERSATION_COMMAND_MESSAGE.get(command, f"Running... Command: {command}")
890db0570da2782482c4c1a2aa2772fbada48278
3,631,548
def load_cert_files( common_name, key_file, public_key_file, csr_file, certificate_file, crl_file ): """Loads the certificate, keys and revoked list files from storage :param common_name: Common Name for CA :type common_name: str, required when there is no CA :param key_file: key file full path :type key_file: str, required :param public_key_file: public key file full path :type public_key_file: str, required :param csr_file: certificate signing request file full path :type csr_file: str, required :param certificate_file: certificate file full path :type certificate_file: str, required :param crl_file: certificate revocation list file full path :type key_file: str, required :return: ``OwncaCertData`` :raises: ``OwnCAInconsistentData`` """ # certificate signing request (if ICA) try: with open(csr_file, "rb") as csr_f: csr_data = csr_f.read() csr = x509.load_pem_x509_csr(csr_data, default_backend()) csr_bytes = csr.public_bytes(encoding=serialization.Encoding.PEM) except FileNotFoundError: csr = None csr_bytes = None # certificate try: with open(certificate_file, "rb") as cert_f: cert_data = cert_f.read() certificate = x509.load_pem_x509_certificate( cert_data, default_backend() ) current_cn_name = ( certificate.subject.rfc4514_string().split("CN=")[-1].split(",")[0] ) certificate_bytes = certificate.public_bytes( encoding=serialization.Encoding.PEM ) except FileNotFoundError: certificate = None certificate_bytes = None if common_name is not None and common_name != current_cn_name: raise OwnCAInconsistentData( "Initialized CN name does not match with current existent " + f"common_name: {current_cn_name}" ) # key try: with open(key_file, "rb") as key_f: key_data = key_f.read() key = serialization.load_pem_private_key( key_data, password=None, backend=default_backend() ) key_bytes = key.private_bytes( serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption(), ) except FileNotFoundError: key = None key_bytes = None with open(public_key_file, "rb") as pub_key_f: pub_key_data = pub_key_f.read() public_key = serialization.load_ssh_public_key( pub_key_data, backend=default_backend() ) public_key_bytes = public_key.public_bytes( serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH ) # certificate revocation list (crl) # if there is not crl file it is created (backward compatible) try: with open(crl_file, "rb") as crl_f: crl_data = crl_f.read() crl = x509.load_pem_x509_crl(crl_data, default_backend()) crl_bytes = crl.public_bytes(encoding=serialization.Encoding.PEM) except FileNotFoundError: if certificate is None: crl = None crl_bytes = None else: crl = ca_crl( ca_cert=certificate, ca_key=key, common_name=common_name ) crl_bytes = crl.public_bytes(encoding=serialization.Encoding.PEM) return OwncaCertData( { "cert": certificate, "cert_bytes": certificate_bytes, "csr": csr, "csr_bytes": csr_bytes, "key": key, "key_bytes": key_bytes, "public_key": public_key, "public_key_bytes": public_key_bytes, "crl": crl, "crl_bytes": crl_bytes } )
fe8a3765e020e91880f6b44791e37b59002eb13e
3,631,549
def parse_note(note: Note) -> MetaEvent: """ Parse a single non system note. """ attributes = {} attributes["event"] = "note" attributes["note_id"] = note["id"] attributes["content"] = note["body"] attributes["event_id"] = note["id"] attributes["noteable_id"] = note["noteable_id"] attributes["noteable_iid"] = note["noteable_iid"] attributes["noteable_type"] = note["noteable_type"] attributes["attachment"] = note["attachment"] initiator = Initiator.from_note(note) return MetaEvent.create(initiator, note["created_at"], attributes)
8105ca5da84a1fc85fae0351d446b9d1dd9fae4b
3,631,550
def get_mvdr_vector(atf_vector, noise_psd_matrix): """ Returns the MVDR beamforming vector. :param atf_vector: Acoustic transfer function vector with shape (..., bins, sensors) :param noise_psd_matrix: Noise PSD matrix with shape (bins, sensors, sensors) :return: Set of beamforming vectors with shape (..., bins, sensors) """ assert noise_psd_matrix is not None while atf_vector.ndim > noise_psd_matrix.ndim - 1: noise_psd_matrix = np.expand_dims(noise_psd_matrix, axis=0) # Make sure matrix is hermitian noise_psd_matrix = 0.5 * ( noise_psd_matrix + np.conj(noise_psd_matrix.swapaxes(-1, -2)) ) try: numerator = solve(noise_psd_matrix, atf_vector) except np.linalg.LinAlgError: bins = noise_psd_matrix.shape[0] numerator = np.empty_like(atf_vector) for f in range(bins): numerator[f], *_ = np.linalg.lstsq(noise_psd_matrix[f], atf_vector[..., f, :]) denominator = np.einsum('...d,...d->...', atf_vector.conj(), numerator) beamforming_vector = numerator / np.expand_dims(denominator, axis=-1) return beamforming_vector
70249a7795c07ed15f351b158cbf6dc1b83895ec
3,631,551
def get_numpy(required=True): """Tries to import numpy. If `required` is False, don't ask again if the user already declined; return None if numpy is not available. If `required` is True, do ask to install, and raise ImportError if numpy can't be set up. """ global _numpy if _numpy is False: try: _numpy = py_import( 'numpy', { 'pip': 'numpy', 'linux-debian': 'python-numpy', 'linux-ubuntu': 'python-numpy', 'linux-fedora': 'numpy'}, store_in_config=not required) except ImportError: _numpy = None if _numpy is None and required: raise ImportError("No module named numpy") return _numpy
1cb4486de4231f93b73f1bc649a1a05454faf530
3,631,552
def norm_pdf(x, mu, sigma): """ Return probability density of normal distribution. """ z = (x - mu) / sigma c = 1.0 / np.sqrt(2 * np.pi) return np.exp(-0.5 * z ** 2) * c / sigma
12db092dad01331b15366b4819d4fde9e631b8de
3,631,553
import sqlite3 def delete_diagnosis(request): """ This method is used to delete diagnosis data in diagnosis table. Query Explanation: - Delete data in diagnosis table. :param request: :return: """ if request.method == 'POST': con = sqlite3.connect("Hospital.db") con.row_factory = dict_factory cur = con.cursor() cur.execute("""delete from diagnosis where id = :num""", {'num': request.POST['ID']}) con.commit() cur.close() return render(request, 'hospital/home.html') else: return render(request, 'hospital/delete_diagnosis.html')
53cf92845c2df00f0fced044bb8faf95530deba6
3,631,554
def chip_calibration( data, mol="O2", F_cal=None, primary=None, tspan=None, tspan_bg=None, t_bg=None, gas="air", composition=None, chip="SI-3iv1", ): """ Returns obect of class EC_MS.Chip, given data for a given gas (typically air) for which one component (typically O2 at M32) has a trusted calibration. The chip object has a capillary length (l_cap) that is set so that the capillary flux matches the measured signal for the calibrated gas. """ if type(mol) is str: m = Molecule(mol) else: m = mol mol = mol.name if F_cal is not None: m.F_cal = F_cal if primary is not None: m.primary = primary if gas == "air" and composition is None: composition = air_composition[mol] x, y = m.get_flux(data, tspan=tspan, unit="mol/s") if tspan_bg is None and t_bg is not None: tspan_bg = t_bg if tspan_bg is not None: x_bg, y_bg = m.get_flux(data, tspan=tspan_bg, unit="mol/s") y0 = np.mean(y_bg) else: y0 = 0 n_dot = np.mean(y) - y0 if type(chip) is str: chip = Chip(chip) n_dot_0 = chip.capillary_flow(gas=gas) / Chem.NA * composition l_eff = chip.l_cap * n_dot_0 / n_dot chip.l_cap = l_eff chip.parameters["l_cap"] = l_eff return chip
321ccc5a229c4a9ebf4be80614340e32aef6231c
3,631,555
import string def tamper(payload, **kwargs): """ Unicode-escapes non-encoded characters in a given payload (not processing already encoded) (e.g. SELECT -> \u0053\u0045\u004C\u0045\u0043\u0054) Notes: * Useful to bypass weak filtering and/or WAFs in JSON contexes >>> tamper('SELECT FIELD FROM TABLE') '\\\\u0053\\\\u0045\\\\u004C\\\\u0045\\\\u0043\\\\u0054\\\\u0020\\\\u0046\\\\u0049\\\\u0045\\\\u004C\\\\u0044\\\\u0020\\\\u0046\\\\u0052\\\\u004F\\\\u004D\\\\u0020\\\\u0054\\\\u0041\\\\u0042\\\\u004C\\\\u0045' """ retVal = payload if payload: retVal = "" i = 0 while i < len(payload): if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits: retVal += "\\u00%s" % payload[i + 1:i + 3] i += 3 else: retVal += '\\u%.4X' % ord(payload[i]) i += 1 return retVal
ef293a5be9698dea8f01186a38794ff9c3482c94
3,631,556
def to_axis_aligned_ras_space(image): """ Transform the image to the closest axis-aligned approximation of RAS (i.e. Nifti) space """ return to_axis_aligned_space(image, medipy.base.coordinate_system.RAS)
c7bb77a1e141672f2d8ea4ecb76c3b3dd0a00d66
3,631,557
def _fixParagraphs(element): """ moves paragraphs so they are child of the last section (if existent) """ if isinstance(element, advtree.Paragraph) and isinstance(element.previous, advtree.Section) \ and element.previous is not element.parent: prev = element.previous parent = element.parent target = prev.getLastChild() element.moveto(target) return True # changed else: for c in element.children[:]: if _fixParagraphs(c): return True
0875e08afe27171a0bd8773298a320acd93a0382
3,631,558
def dice_loss(label, target): """Soft Dice coefficient loss TP, FP, and FN are true positive, false positive, and false negative. .. math:: dice &= \\frac{2 \\times TP}{ 2 \\times TP + FN + FP} \\\\ dice &= \\frac{2 \\times TP}{(TP + FN) + (TP + FP)} objective is to maximize the dice, thus the loss is negate of dice for numerical stability (+1 in denominator) and fixing the loss range (+1 in numerator and +1 to the negated dice). The final Dice loss is formulated as .. math:: dice \ loss = 1 - \\frac{2 \\times TP + 1}{(TP + FN) + (TP + FP ) + 1} it is soft as each components of the confusion matrix (TP, FP, and FN) are estimated by dot product of probability instead of hard classification Args: label: 4D or 5D label tensor target: 4D or 5D target tensor Returns: dice loss """ eps = 1e-6 target = tf.clip_by_value(tf.nn.softmax(target), eps, 1 - eps) target, label = target[..., 1:], label[..., 1:] yy = tf.multiply(target, target) ll = tf.multiply(label, label) yl = tf.multiply(target, label) axis_ = tf.range(1, tf.rank(label) - 1) return tf.reduce_mean(1 - (2 * tf.reduce_sum(yl, axis=axis_, keepdims=True)) / (tf.reduce_sum(ll, axis=axis_, keepdims=True) + tf.reduce_sum(yy, axis=axis_, keepdims=True) + eps), axis=-1)
526104e7ba1fd974444b1141913d593e4ee4efb1
3,631,559
from typing import Union from typing import List def no_subseqs(x_tokens: Union[List[str], str]) -> bool: """ Checks to see whether a string lacks the subsequences ab, bc, cd, and dc. :param x_tokens: A string :return: True iff x_tokens does not have any subsequences """ letters = set() for letter in x_tokens: if not _check_subseqs(letters, letter): return False letters.add(letter) return True
434dade2ca1801bed0895a79ba281f90e6b78177
3,631,560
import torch def _get_random_R(): """ random angle-axis -> Rodrigues """ random_angle_axis = torch.tensor(np.random.rand(1, 3)) return RodriguesBlock()(random_angle_axis).numpy()[0]
69fd6cfe7a8338941b67b77448941d17cc2c16d0
3,631,561
from datetime import datetime def timestamp_to_iso(timestamp): """ Converts an ISO 8601 timestamp (in the format `YYYY-mm-dd HH:MM:SS`) to :class:`datetime` Example: >>> timestamp_to_iso(timestamp='2020-02-02 02:02:02') datetime(year=2020, month=2, day=2, hour=2, minute=2, second=2) :param timestamp: timestamp to convert :return: datetime representation of the timestamp """ return datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S')
7de7ea8b1fd5bd4d854c43b9818bf6f8f58da279
3,631,562
def rename_category_for_flattening(category, category_parent=""): """ Tidy name of passed category by removing extraneous characters such as '_' and '-'. :param category: string to be renamed (namely, a category of crime) :param category_parent: optional string to insert at the beginning of the string (in addition to other edits) :return: new string name for category passed """ if category_parent == "": return category.lower().replace(" ", "_").replace("/", "").replace("(", "").replace(")", "").replace(",", "").replace(";", "_").replace("-", "") return category_parent + "_" + category.lower().replace(" ", "_").replace("/", "").replace("(", "").replace(")", "").replace(",", "").replace(";", "").replace("-", "")
360e87da0a8a778f32c47adc58f33a2b92fea801
3,631,563
import math def billing_bucket(t): """ Returns billing bucket for AWS Lambda. :param t: An elapsed time in ms. :return: Nearest 100ms, rounding up, as int. """ return int(math.ceil(t / 100.0)) * 100
87b9963c1a2ef5ad7ce1b2fac67e563dcd763f73
3,631,564
import hashlib def filename_to_int_hash(text): """ Returns the sha1 hash of the text passed in. """ hash_name_hashed = hashlib.sha1(text.encode("utf-8")).hexdigest() return int(hash_name_hashed, 16)
b5cb53b921146d4ae124c20b0b267acc80f6de43
3,631,565
def export(): """Export all components and connected nets, as a netlist in KiCad pcbnew compatible format. This also saves a database with all captured internal information about schematic, components and nets. These information are used in subsequent runs to ensure stable designators. """ return export_(_current_schematic)
141c670b43f831cc4374692370f5f651445486fb
3,631,566
def dropout(x, rate, training=None): """Simple dropout layer.""" if not training or rate == 0: return x if compat.is_tf2(): return tf.nn.dropout(x, rate) else: return tf.nn.dropout(x, 1.0 - rate)
77ba40883e76366de27d15fc03f601d7efdcae0b
3,631,567
import re def readFastQ(fastq_path): """ Reads fastq file and returns a dictionary with the header as a key """ with open(fastq_path,'r') as FASTQ: fastq_generator = FastqGeneralIterator(FASTQ) readDict = {re.sub('/[1-2]','',header).split(' ')[0]:(seq,qual) for header, seq,qual in fastq_generator} return (readDict)
4dbcbb8d7ba8a6b5d77c2477c2b97d00d4a9a19c
3,631,568
def dilation(args) -> list: """Compute dilation of a given object in a segmentation mask Args: args: masks, obj and dilation kernel Returns: """ mask, obj, kernel = args dilated_img = binary_dilation(mask == obj, kernel) cells = np.unique(mask[dilated_img]) cells = cells[cells != obj] # remove object itself cells = cells[cells != 0] # remove background return [(obj, cell, {EDGE_WEIGHT: 1}) for cell in cells]
f9edc59e4db7e8774916542be887e0ad3a82ec78
3,631,569
def adjust_lr_on_plateau(optimizer): """Decrease learning rate by factor 10 if validation loss reaches a plateau""" for param_group in optimizer.param_groups: param_group['lr'] = param_group['lr']/10 return optimizer
615631fd4853e7f0c0eae59a3336eb4c4794d3a3
3,631,570
import os def generate_key(): """ 生成节点公私钥 :return: 私钥 公钥 """ extra_entropy = '' extra_key_bytes = text_if_str(to_bytes, extra_entropy) key_bytes = keccak(os.urandom(32) + extra_key_bytes) privatekey = keys.PrivateKey(key_bytes) pubKey = keys.private_key_to_public_key(privatekey) address = pubKey.to_address() # return address, privatekey.to_hex()[2:], keys.private_key_to_public_key(privatekey).to_hex()[2:] return address, privatekey.to_hex()[2:]
fc04640b04bf316c160fc21f0482a6ffd33d72cc
3,631,571
from typing import List from typing import Dict from typing import Any from typing import Union def get_partial_match_metrics( preds: List[List[str]], labels: List[List[str]] ) -> Dict[Any, Any]: """ Suppose there are N such pairs in the gold data and the system predicts M such pairs. Say a ‘partial match’ happens when the system predicts a pair <term,defn> and there is some overlap (at least one token) between the predicted and gold term spans AND there is some overlap between the predicted and gold definition spans. Let X be the number of partial matches. What are Partial match precision = P/M Partial match recall = P/N """ assert len(preds) == len(labels) both_in_preds, both_in_labels = [], [] partial_matches, exact_matches = [], [] for pred_sent, label_sent in zip(preds, labels): simple_pred_sent = simplify_tokens(pred_sent) simple_label_sent = simplify_tokens(label_sent) # check whether term/def exist together both_in_pred = "TERM" in simple_pred_sent and "DEF" in simple_pred_sent both_in_label = "TERM" in simple_label_sent and "DEF" in simple_label_sent both_in_preds.append(both_in_pred) both_in_labels.append(both_in_label) partial_match = False exact_match = False match: List[Union[str, bool]] = [] if both_in_pred and both_in_label: for p, l in zip(simple_pred_sent, simple_label_sent): if p == l: match.append(p) else: match.append(False) if "TERM" in match and "DEF" in match: partial_match = True if False not in match: exact_match = True partial_matches.append(partial_match) exact_matches.append(exact_match) count_both_in_preds = sum(both_in_preds) # N count_both_in_labels = sum(both_in_labels) # M count_partial_matches = sum(partial_matches) # P count_exact_matches = sum(exact_matches) # E partial_precision = count_partial_matches / count_both_in_preds partial_recall = count_partial_matches / count_both_in_labels partial_fscore = ( 2 * partial_precision * partial_recall / (partial_precision + partial_recall) ) exact_precision = count_exact_matches / count_both_in_preds exact_recall = count_exact_matches / count_both_in_labels exact_fscore = 2 * exact_precision * exact_recall / (exact_precision + exact_recall) return { "partial_match_precision": partial_precision, "partial_match_recall": partial_recall, "partial_match_f1": partial_fscore, "exact_match_precision": exact_precision, "excat_match_recall": exact_recall, "excat_match_f1": exact_fscore, }
05eaf9fce152e6266698e9b5613a2770a000c48d
3,631,572
def find_submission_id_command( client: Client, limit: int = 50, filter: str = "", offset: str = "", sort: str = "", ) -> CommandResults: """Find submission IDs for uploaded files by providing an FQL filter and paging details. :param client: the client object with an access token :param limit: maximum number of report IDs to return :param filter: optional filter and sort criteria in the form of an FQL query :param offset: the offset to start retrieving reports from. :param sort: sort order: asc or desc :return: Demisto outputs when entry_context and responses are lists """ response = client.find_submission_id(limit, filter, offset, sort) result = parse_outputs(response, reliability=client.reliability, resources_fields=('id',)) return CommandResults( outputs_key_field='id', outputs_prefix=OUTPUTS_PREFIX, outputs=result.output, readable_output=tableToMarkdown("CrowdStrike Falcon X response:", result.output), raw_response=result.response, )
e7e285c8d2b10af6ab7d0337cb0db7bea2664478
3,631,573
def average(aggregation, discard_zeros=False): """Perform the average aggregation""" # This method take the data from the inmediate lower granularity and computes the # average, then it insert the new average try: # Calculate the inmediate lower granularity: LOGGER.debug('Requested granularity is %s', aggregation.granularity) gran_value = GRANULARITY_TEXT_VALUE[aggregation.granularity] LOGGER.debug('Converted to granularity with value %s', gran_value) tbl_lower_gran_name = get_granularity_table(gran_value - 1) LOGGER.debug('Will query table %s', tbl_lower_gran_name) # Need to compute the period to query start, end = get_interval(gran_value, aggregation.item_time, aggregation.timezone) LOGGER.debug('Compute period: %s %s', start, end) except KeyError, e: LOGGER.error('Exception configuring tables: %s', e) raise e low_gran_count, low_gran_values, __ = get_data_from_table(tbl_lower_gran_name, aggregation, start, end) if discard_zeros: without_zeros = [value for value in low_gran_values if value != 0] low_gran_values = without_zeros low_gran_count = len(without_zeros) if low_gran_count and low_gran_values: LOGGER.debug('Going to perform average on %s', low_gran_values) # Compute the average avg_value = sum(low_gran_values) / float(low_gran_count) LOGGER.debug('Average value: %s', avg_value) # Set the computed average response = aggregation.table.update_item( Key={ 'timeserie': aggregation.timeserie, 'time': str(aggregation.item_time), }, UpdateExpression="SET #value = :value, #ttl = :ttl", ExpressionAttributeNames={'#value': 'value', '#ttl': 'ttl'}, ExpressionAttributeValues={ ':value': Decimal(str(avg_value)), ':ttl': long(aggregation.ttl) }, ReturnValues="UPDATED_NEW" ) LOGGER.debug('Updating item %s-%s adding value %s', aggregation.timeserie, aggregation.item_time, avg_value) LOGGER.debug('Response : %s', response) return True else: LOGGER.debug('No data from lower granularity')
f6e7d7338c2ada52c7ec6f8abd30dcdeba07e1ff
3,631,574
def chain(node1, node2, include_ids=False, only_ids=False): """ Find a chain of dependency tags from `node1` to `node2` (if possible) :param node1: The node 1 :type node1: udon2.Node :param node2: The node 2 :type node2: udon2.Node """ node, chain = node2, [] while not node.is_identical(node1, ""): chain.append(node.id if only_ids else f"{node.deprel}#{int(node.id)}" if include_ids else node.deprel) node = node.parent chain.reverse() return chain
bcfe1497ea731ad902bc5760542c8ce6f3286b60
3,631,575
def parallax(sc, d2p=True, **kw): """Parallax. Parameters ---------- sc: SkyCoord ** warning: check if skycoord frame centered on Earth d2p: bool if true: arg = distance -> parallax_angle else: arg = parallax_angle -> distance Returns ------- parallax_angle or distance: scalar, array """ if d2p: return parallax_angle(sc) else: return parallax_distance(sc)
d2d79dd67a07e71ef6a411fd4567c591335cbe83
3,631,576
def dense(x, output_dim, reduced_dims=None, expert_dims=None, use_bias=True, activation=None, name=None): """Dense layer doing (kernel*x + bias) computation. Args: x: a mtf.Tensor of shape [..., reduced_dims]. output_dim: a mtf.Dimension reduced_dims: an optional list of mtf.Dimensions of x to be reduced. If omitted, we reduce the last dimension. expert_dims: an optional list of mtf.Dimension which represent different experts. Different experts get different weights. use_bias: a boolean, whether to add bias. activation: an optional function from mtf.Tensor to mtf.Tensor name: a string. variable scope. Returns: a mtf.Tensor of shape [..., output_dim]. """ if expert_dims is None: expert_dims = [] if reduced_dims is None: reduced_dims = x.shape.dims[-1:] w_shape = mtf.Shape(expert_dims + reduced_dims + [output_dim]) output_shape = mtf.Shape( [d for d in x.shape.dims if d not in reduced_dims] + [output_dim]) with tf.variable_scope(name, default_name="dense"): stddev = mtf.list_product(d.size for d in reduced_dims) ** -0.5 w = mtf.get_variable( x.mesh, "kernel", w_shape, initializer=tf.random_normal_initializer(stddev=stddev), activation_dtype=x.dtype) y = mtf.matmul(x, w, output_shape=output_shape) if use_bias: b = mtf.get_variable( x.mesh, "bias", mtf.Shape(expert_dims + [output_dim]), initializer=tf.zeros_initializer(), activation_dtype=x.dtype) y += b if activation is not None: y = activation(y) return y
1303b164c266759f617f6abf3cfba07fdeff5ccd
3,631,577
import os def record_CT(dataset_path): """load CT image folds into json format """ patients_dict = dict() for p in os.listdir(dataset_path): print(p) patient_path = osp.join(dataset_path, p) studies_list = dict() for study in os.listdir(patient_path): study_path = osp.join(patient_path, study) series_list = list() for series in os.listdir(study_path): series_path = osp.join(study_path, series) if series_modality(series_path) != 'CT': continue series_list.append(series) studies_list[study] = series_list patients_dict[p] = studies_list return patients_dict
8b1ecf09f0b22ec4901e9844cf896a9321401489
3,631,578
import select def requires_cuda_enabled(): """Returns constraint_setting that is not satisfied unless :is_cuda_enabled. Add to 'target_compatible_with' attribute to mark a target incompatible when @rules_cuda//cuda:enable_cuda is not set. Incompatible targets are excluded from bazel target wildcards and fail to build if requested explicitly.""" return select({ "@rules_cuda//cuda:is_cuda_enabled": [], "//conditions:default": ["@platforms//:incompatible"], })
aec9d4c9ed55c44aaf0f6d3e6862bf6b0c24471e
3,631,579
from typing import Union from typing import Any from typing import Callable import warnings def add_activated_handler(parent : Union[int, str], *, label: str =None, user_data: Any =None, use_internal_label: bool =True, tag: Union[int, str] =0, callback: Callable =None, show: bool =True) -> Union[int, str]: """ Adds a handler which runs a given callback when the specified item is activated. Args: parent (Union[int, str]): **label (str): Overrides 'name' as label. **user_data (Any): User data for callbacks. **use_internal_label (bool): Use generated internal label instead of user specified (appends ### uuid). **tag (Union[int, str]): Unique id used to programmatically refer to the item.If label is unused this will be the label. **callback (Callable): Registers a callback. **show (bool): Attempt to render widget. Returns: Union[int, str] """ warnings.warn("'add_activated_handler' is deprecated. Use: `add_item_activated_handler(...)`", DeprecationWarning, 2) return internal_dpg.add_item_activated_handler(parent, label=label, user_data=user_data, use_internal_label=use_internal_label, tag=tag, callback=callback, show=show)
d3b107b0cd1d1fef195590a01d923adf8e84ee24
3,631,580
import logging import sys import csv import configparser import toml import json def main(argv=None, abort=False, debug=None): """Drive the validator. This function acts as the command line interface backend. There is some duplication to support testability. """ init_logger(level=logging.DEBUG if debug else None) forest = argv if argv else sys.argv[1:] if not forest: print("Usage: gelee paths-to-files") return 0, "USAGE" num_trees = len(forest) LOG.debug("Guarded dispatch forest=%s, num_trees=%d", forest, num_trees) LOG.info("Starting validation visiting a forest with %d tree%s", num_trees, '' if num_trees == 1 else 's') failure_path_reason = "Failed validation for path %s with error: %s" total, folders, ignored, csvs, inis, jsons, tomls, xmls, yamls = 0, 0, 0, 0, 0, 0, 0, 0, 0 failures = 0 for tree in forest: for path in visit(tree): LOG.debug(" - path=%s, total=%d", path, total) total += 1 if not path.is_file(): folders += 1 continue final_suffix = '' if not path.suffixes else path.suffixes[-1].lower() if final_suffix == ".csv": if not path.stat().st_size: LOG.error(failure_path_reason, path, "ERROR: Empty CSV file") if abort: return 1, "ERROR: Empty CSV file" failures += 1 continue with open(path, newline='') as handle: try: try: dialect = csv.Sniffer().sniff(handle.read(1024), ",\t; ") handle.seek(0) except csv.Error as err: if "could not determine delimiter" in str(err).lower(): dialect = csv.Dialect() dialect.delimiter = ',' dialect.quoting = csv.QUOTE_NONE dialect.strict = True else: LOG.error(failure_path_reason, path, slugify(err)) if abort: return 1, str(err) failures += 1 try: reader = csv.reader(handle, dialect) for _ in reader: pass csvs += 1 except csv.Error as err: LOG.error(failure_path_reason, path, slugify(err)) if abort: return 1, str(err) failures += 1 except (Exception, csv.Error) as err: LOG.error(failure_path_reason, path, slugify(err)) if abort: return 1, str(err) failures += 1 elif final_suffix == ".ini": config = configparser.ConfigParser() try: config.read(path) inis += 1 except configparser.NoSectionError as err: LOG.error(failure_path_reason, path, slugify(err)) if abort: return 1, str(err) failures += 1 except configparser.DuplicateSectionError as err: LOG.error(failure_path_reason, path, slugify(err)) if abort: return 1, str(err) failures += 1 except configparser.DuplicateOptionError as err: LOG.error(failure_path_reason, path, slugify(err)) if abort: return 1, str(err) failures += 1 except configparser.NoOptionError as err: LOG.error(failure_path_reason, path, slugify(err)) if abort: return 1, str(err) failures += 1 except configparser.InterpolationDepthError as err: LOG.error(failure_path_reason, path, slugify(err)) if abort: return 1, str(err) failures += 1 except configparser.InterpolationMissingOptionError as err: LOG.error(failure_path_reason, path, slugify(err)) if abort: return 1, str(err) failures += 1 except configparser.InterpolationSyntaxError as err: LOG.error(failure_path_reason, path, slugify(err)) if abort: return 1, str(err) failures += 1 except configparser.InterpolationError as err: LOG.error(failure_path_reason, path, slugify(err)) if abort: return 1, str(err) failures += 1 except configparser.MissingSectionHeaderError as err: LOG.error(failure_path_reason, path, slugify(err)) if abort: return 1, str(err) failures += 1 except configparser.ParsingError as err: LOG.error(failure_path_reason, path, slugify(err)) if abort: return 1, str(err) failures += 1 elif final_suffix in (".geojson", ".json", ".toml"): loader = toml.load if final_suffix == ".toml" else json.load with open(path, "rt", encoding="utf-8") as handle: try: _ = loader(handle) if final_suffix == ".toml": tomls += 1 else: jsons += 1 except Exception as err: LOG.error(failure_path_reason, path, slugify(err)) if abort: return 1, str(err) failures += 1 elif final_suffix == ".xml": if not path.stat().st_size: LOG.error(failure_path_reason, path, "ERROR: Empty XML file") if abort: return 1, "ERROR: Empty XML file" failures += 1 continue xml_tree, message = load_xml(path) if xml_tree: xmls += 1 else: LOG.error(failure_path_reason, path, slugify(message)) if abort: return 1, str(message) failures += 1 elif final_suffix in (".yaml", ".yml"): with open(path, "rt", encoding="utf-8") as handle: try: _ = load_yaml(handle, Loader=LoaderYaml) yamls += 1 except Exception as err: LOG.error(failure_path_reason, path, slugify(err)) if abort: return 1, str(err) failures += 1 else: ignored += 1 continue success = "Successfully validated" pairs = ( (csvs, "CSV"), (inis, "INI"), (jsons, "JSON"), (tomls, "TOML"), (xmls, "XML"), (yamls, "YAML"), ) for count, kind in pairs: if count: LOG.info( "- %s %d total %s file%s.", success, count, kind, "" if count == 1 else "s") configs = csvs + inis + jsons + tomls + xmls + yamls LOG.info( # TODO remove f-strings also here f"Finished validation of {configs} configuration file{'' if configs == 1 else 's'}" f" with {failures} failure{'' if failures == 1 else 's'}" f" visiting {total} path{'' if total == 1 else 's'}" f" (ignored {ignored} non-config file{'' if ignored == 1 else 's'}" f" in {folders} folder{'' if folders == 1 else 's'})" ) print(f"{'OK' if not failures else 'FAIL'}") return 0, ""
9ad9f8f7a4666c093f908cf3514ec2528a1214ff
3,631,581
import os import fnmatch import time def filter_files(files, search_settings): """ Filter a list of files based on the search settings """ ret_val = [] patterns = search_settings['patterns'] for f in files: try: file_path = f.path except AttributeError: file_path = f for path in search_settings['paths']: path += os.sep # file is in scope, let's check patterns if patterns: for pattern in patterns: if fnmatch(file_path, pattern): ret_val.append(file_path) break time.sleep(0) else: ret_val.append(file_path) time.sleep(0) time.sleep(0) return sorted(list(set(ret_val)))
4f33968ad37ebbb0b18d1440ce14c40af7a32d79
3,631,582
def application(service, custom_app_plan, custom_application, request): """First application bound to the account and service_plus""" plan = custom_app_plan(rawobj.ApplicationPlan(blame(request, "aplan")), service) return custom_application(rawobj.Application(blame(request, "app"), plan))
dc27ecd53a276bf194e92c6ee716b94fa2cf1445
3,631,583
def printf_line(*args): """printf_line(int indent, char format, v(...) ?) -> bool""" return _idaapi.printf_line(*args)
a1a9214f6c4013d3654187724839b6aeed1c4220
3,631,584
import logging def baseline_correction_using_plane(coh_ab,uw_phase,kz): """ Baseline correction based on a plane WARNINGS: - From choi idl code - We should really check with TAXI the baseline correction for a better processing Parameters ---------- coh_ab : 2D numpy array absolute value of the cohrece uw_phase : 2D numpy array unwrapped phase kz : 2D numpy array vertical wavenumber Returns ------- plane : 2D numpy array Plane with the correction to be applyed to the interferogram Notes ------- Author : Victor Cazcarra-Bes (victor.cazcarrabes@dlr.de) Date : February 2021 """ log = logging.getLogger('baseline_correction_using_plane') log.info('Compute the baseline correction using a plane ...') z_res = uw_phase / kz cal_points = np.where(coh_ab > 0.95) residual = z_res[cal_points[0], cal_points[1]] HH = np.asarray(np.vstack([cal_points[0], cal_points[1], np.ones(len(cal_points[0]))]), 'float64') cc1 = np.matmul(HH, np.transpose(HH)) cc2 = np.linalg.inv(cc1) cc3 = np.matmul(cc2, HH) coef = np.matmul(cc3, residual) rgmesh,azmesh = np.meshgrid(range(coh_ab.shape[1]), range(coh_ab.shape[0])) #plane = coef[0] * rgmesh + coef[1] * azmesh + coef[2] plane = coef[0] *azmesh + coef[1] * rgmesh + coef[2] return plane
f8d812bb019d0d96d23440d51315aaa40448a5b2
3,631,585
import os def load_example_asos() -> DataFrame: """ Fixture to load example data """ example_data_path = os.path.abspath( os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, "staticdata") ) data_path = os.path.join(example_data_path, "AMW_example_data.csv") return meteogram.download_asos_data(data_path)
fb411893a5e40c03d451c0cd4a6e707bf85653e6
3,631,586
def variable(default=None, dependencies=(), holds_data=True): """ Required decorator for data_dict of custom structs. The enclosing class must be decorated with struct.definition(). :param default: default value passed to validation if no other value is specified :param dependencies: other items (string or reference for inherited constants_dict) :param holds_data: determines whether the variable is considered by data-related functions :return: read-only property """ def decorator(validate): item = Item(validate.__name__, validate, True, default, dependencies, holds_data) _register_item(validate, item) return item return decorator
fdc11425beddaf47985cea7729244ac073922794
3,631,587
def define_circle(p1, p2, p3): """ Returns the center and radius of the circle passing the given 3 points. In case the 3 points form a line, returns (None, infinity). """ temp = p2[0] * p2[0] + p2[1] * p2[1] bc = (p1[0] * p1[0] + p1[1] * p1[1] - temp) / 2 cd = (temp - p3[0] * p3[0] - p3[1] * p3[1]) / 2 det = (p1[0] - p2[0]) * (p2[1] - p3[1]) - (p2[0] - p3[0]) * (p1[1] - p2[1]) if abs(det) < 1.0e-6: return np.inf # Center of circle cx = (bc * (p2[1] - p3[1]) - cd * (p1[1] - p2[1])) / det cy = ((p1[0] - p2[0]) * cd - (p2[0] - p3[0]) * bc) / det radius = np.sqrt((cx - p1[0]) ** 2 + (cy - p1[1]) ** 2) return radius
aedb4bc6173df09962ab81a4124acbd245af7612
3,631,588
def create_upload_token(self, request, form): """ Create a new upload token. """ layout = ManageUploadTokensLayout(self, request) if form.submitted(request): self.create() request.message(_("Upload token created."), 'success') return morepath.redirect(layout.manage_model_link) return { 'layout': layout, 'form': form, 'message': _("Create a new upload token?"), 'button_text': _("Create"), 'title': _("Create token"), 'cancel': layout.manage_model_link }
feba475900a01e3984fed7d49b3c6709e8b82bd8
3,631,589
def vertical_channel_region_detection(image): """ :param image: :return: """ f = spectrum_bins_by_length(image.shape[1]) ft_h_s = tunable('channels.vertical.recursive.fft_smoothing_width', 3, description="For channel detection (recursive, vertical), spectrum smoothing width.") def horizontal_mean_frequency(img_frag, clean_around=None, clean_width=0.0): """ :param img_frag: :param clean_around: :param clean_width: :return: """ ft = np.absolute(spectrum_fourier(horizontal_mean(img_frag))) ft /= 0.5 * ft[0] ft[0] = 0 ft = hamming_smooth(ft, ft_h_s) if clean_around: ft[np.absolute(f - clean_around) > clean_width] = 0.0 return ft.max(), f[np.argmax(ft)] power_overall_f, overall_f = horizontal_mean_frequency(image) d = tunable('channels.vertical.recursive.maximum_delta', 2.0, description="For channel detection (recursive, vertical), maximum delta.") power_min_quotient = tunable('channels.vertical.recursive.power_min_quotient', 0.005, description="For channel detection (recursive, vertical), minimum power quotient") break_condition = tunable('channels.vertical.recursive.break_condition', 2.0, description="For channel detection (recursive, vertical), recursive break condition.") current_clean_width = overall_f / 2.0 def matches(img_frag): """ :param img_frag: :return: """ power_local_f, local_f = horizontal_mean_frequency( img_frag, clean_around=overall_f, clean_width=current_clean_width) return (abs(overall_f - local_f) < d) and ((power_local_f / power_overall_f) > power_min_quotient) height = image.shape[0] collector = np.zeros(height) def recursive_check(top, bottom, orientation=FIRST_CALL): """ :param top: :param bottom: :param orientation: :return: """ if (bottom - top) < break_condition: return mid = (top + bottom) // 2 upper = matches(image[top:mid, :]) lower = matches(image[mid:bottom, :]) collector[top:mid] = upper collector[mid:bottom] = lower if orientation is FIRST_CALL: if upper: recursive_check(top, mid, FROM_TOP) if lower: recursive_check(mid, bottom, 1) elif orientation is FROM_TOP: if upper and lower: recursive_check(top, mid, FROM_TOP) elif not upper and lower: recursive_check(mid, bottom, FROM_TOP) elif orientation is FROM_BOTTOM: if lower and upper: recursive_check(mid, bottom, FROM_BOTTOM) elif not lower and upper: recursive_check(top, mid, FROM_BOTTOM) recursive_check(0, height) return sorted(find_insides(collector), key=lambda pair: pair[1] - pair[0], reverse=True)[0]
f70b702df3ab52c1d0c538c55cb1df180b003df2
3,631,590
import os def file_str(f): """ :param f: 输入完整路径的文件夹或文件名 :return: 返回简化的名称 a/b ==> <b> a/b.txt ==> b.txt """ name = os.path.basename(f) if os.path.isdir(f): s = '<' + name + '>' else: s = name return s
60ea019dc5bf2145b85d15e4c58ae9c08a588c38
3,631,591
def image_output_size(input_shape, size, stride, padding): """Calculate the resulting output shape for an image layer with the specified options.""" if len(size) > 2 and input_shape[3] != size[2]: print("Matrix size incompatible!") height = size[0] width = size[1] out_depth = size[3] if len(size) > 2 else int(input_shape[3]) input_height = input_shape[1] input_width = input_shape[2] if padding == "VALID": input_height -= height - 1 input_width -= width - 1 return ( int(input_shape[0]), (input_height + stride[0] - 1) // stride[0], (input_width + stride[1] - 1) // stride[1], out_depth )
77665f8304570bd5ba805241131a96d5d6908587
3,631,592
import re from typing import Tuple def _info_from_match(match: re.Match, start: int) -> Tuple[str, int]: """Returns the matching text and starting location if none yet available""" if start == -1: start = match.start() return match.group(), start
3599c6345db5ce2e16502a6e41dda4684da2f617
3,631,593
def otherICULegacyLinks(): """The file `icuTzDir`/tools/tzcode/icuzones contains all ICU legacy time zones with the exception of time zones which are removed by IANA after an ICU release. For example ICU 67 uses tzdata2018i, but tzdata2020b removed the link from "US/Pacific-New" to "America/Los_Angeles". ICU standalone tzdata updates don't include modified icuzones files, so we must manually record any IANA modifications here. After an ICU update, we can remove any no longer needed entries from this function by checking if the relevant entries are now included in icuzones. """ return { # Current ICU is up-to-date with IANA, so this dict is empty. }
bfacf0d8b5a31c5edbd69f93c4d55d8857599e1a
3,631,594
def _postprocess_gif(gif: np.ndarray): """Process provided gif to a format that can be logged to Tensorboard.""" gif = np.clip(255 * gif, 0, 255).astype(np.uint8) B, T, C, H, W = gif.shape frames = gif.transpose((1, 2, 3, 0, 4)).reshape((1, T, C, H, B * W)) return frames
c9adb9c2d56dc437ee0e6b0aa7482da4e430aa2e
3,631,595
def format_meta(metadictionary): """returns a string showing metadata""" returntext = EMPTYCHAR returntext += 'SIZE' + BLANK + COLON + BLANK + str(metadictionary['size'])+EOL returntext += 'USER' + BLANK + COLON + BLANK + str(metadictionary['user'])+EOL returntext += 'DATE' + BLANK + COLON + BLANK +format_keys(metadictionary['date'])+EOL return returntext
e24f6846a8d9470899e74099a56780b0a16a7e76
3,631,596
import os def look_in_directory(directory, file_to_find): """ Loop through the current directory for the file, if the current item is a directory, it recusively looks through that folder """ # Loop over all the items in the directory for f in os.listdir(directory): # Uncomment the line below to see how the files/folders are searched # print "Looking in " + directory # If the item is a file check to see if it is what we are looking # for, if it is, print that we found it and return true if os.path.isfile(os.path.join(directory, f)): if f == file_to_find: print("Found file: " + os.path.join(directory, f)) return True # If the item is a directory, we recursivley look through that # directory if it is found, we again return true if os.path.isdir(os.path.join(directory, file_to_find)): if look_in_directory(os.path.join(directory, f)): return True
2023281e743227d3ba0172cc38fa9e9fe14bcdb2
3,631,597
def accumulating_income(): """ Real Name: Accumulating Income Original Eqn: Income Units: Month/Month Limits: (None, None) Type: component Subs: None """ return income()
9753c68223f351629deefbb46266b051952e31e5
3,631,598
import sys def createConfig(namespace=None, updateEnv=None, updateEnvMap=None, updateEnvHelp=None, updateEnvDefaults=None, updateProperties=None, config=Config, initFromEnv=True, **settings): """Creates a base configuration class.""" namespace = sys.modules[namespace] if isinstance(namespace, str) else namespace class Config(config): configurationsNamespace = namespace Config.root = Config Config.update(settings) if updateEnv: Config.env.update(updateEnv) if updateEnvMap: Config.envMap.update(updateEnvMap) if updateEnvHelp: Config.envHelp.update(updateEnvHelp) if updateEnvDefaults: Config.envDefaults.update(updateEnvDefaults) if updateProperties: Config.updateProperties(updateProperties) if initFromEnv: Config.initFromEnv() return Config
b4966623c6ea20ae26335e51f279b0d2f9521005
3,631,599