content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def _get_inner_text(html_node): """Returns the plaintext of an HTML node. This turns out to do exactly what we want: - strips out <br>s and other markup - replace <a> tags with just their text - converts HTML entities like &nbsp; and smart quotes into their unicode equivalents """ return lxml.html.tostring(html_node, encoding='utf-8', method='text', with_tail=False).decode('utf-8')
36697baa3ad4bb8b2f33d37109dfbe8513517c13
3,635,600
from typing import Iterable from typing import Type import pkgutil from typing import cast from typing import Any import os import importlib def get_all_commands() -> Iterable[Type[Cog]]: """ List all applications. """ loader = pkgutil.get_loader('figtag.apps') filename = cast(Any, loader).get_filename() pkg_dir = os.path.dirname(filename) for (module_loader, name, ispkg) in pkgutil.iter_modules([pkg_dir]): importlib.import_module('.apps.' + name, __package__) return Cog.__subclasses__()
caeed4994d739194eb87c7f97ed881dd936da417
3,635,601
def zern_normalisation(nmodes=30): """ Calculate normalisation vector. This function calculates a **nmodes** element vector with normalisation constants for Zernike modes that have not already been normalised. @param [in] nmodes Size of normalisation vector. @see <http://research.opt.indiana.edu/Library/VSIA/VSIA-2000_taskforce/TOPS4_2.html> and <http://research.opt.indiana.edu/Library/HVO/Handbook.html>. """ nolls = (noll_to_zern(j+1) for j in xrange(nmodes)) norms = [(2*(n+1)/(1+(m==0)))**0.5 for n, m in nolls] return np.asanyarray(norms)
badd1d6f7ec185edcac42e4ec0cc8be748557fdf
3,635,602
def sample_without_replacement(n, N, dtype=np.int64): """Returns uniform samples in [0, N-1] without replacement. It will use Knuth sampling or rejection sampling depending on the parameters n and N. .. note:: the values 0.6 and 100 are based on empirical tests of the functions and would need to be changed if the functions are changed """ if N > 100 and n / float(N) < 0.6: sample = rejection_sampling(n, N, dtype) else: sample = Knuth_sampling(n, N, dtype) return sample
d044e09a910f543adb754a6dacd661e985e5bf0f
3,635,603
def bustypes(bus, gen): """Builds index lists of each type of bus (C{REF}, C{PV}, C{PQ}). Generators with "out-of-service" status are treated as L{PQ} buses with zero generation (regardless of C{Pg}/C{Qg} values in gen). Expects C{bus} and C{gen} have been converted to use internal consecutive bus numbering. @param bus: bus data @param gen: generator data @return: index lists of each bus type @author: Ray Zimmerman (PSERC Cornell) @author: Richard Lincoln changes by Uni Kassel (Florian Schaefer): If new ref bus is chosen -> Init as numpy array """ # get generator status # nb = bus.shape[0] # ng = gen.shape[0] # gen connection matrix, element i, j is 1 if, generator j at bus i is ON #Cg = sparse((gen[:, GEN_STATUS] > 0, # (gen[:, GEN_BUS], range(ng))), (nb, ng)) # number of generators at each bus that are ON #bus_gen_status = (Cg * ones(ng, int)).astype(bool) # form index lists for slack, PV, and PQ buses ref = find((bus[:, BUS_TYPE] == REF)) # ref bus index pv = find((bus[:, BUS_TYPE] == PV)) # PV bus indices pq = find((bus[:, BUS_TYPE] == PQ)) # PQ bus indices return ref, pv, pq
41f3fd23c217e113f6395475a7920f27199d6dd9
3,635,604
from typing import Union from typing import Mapping from typing import Iterable from typing import Tuple from typing import Any def update_and_return_dict( dict_to_update: dict, update_values: Union[Mapping, Iterable[Tuple[Any, Any]]] ) -> dict: """Update a dictionary and return the ref to the dictionary that was updated. Args: dict_to_update (dict): the dict to update update_values (Union[Mapping, Iterable[Tuple[Any, Any]]]): the values to update the dict with Returns: dict: the dict that was just updated. """ dict_to_update.update(update_values) return dict_to_update
8622f96a9d183c8ce5c7f260e97a4cb4420aecc7
3,635,605
def get_max_value_key(dic): """Gets the key for the maximum value in a dict.""" v = np.array(list(dic.values())) k = np.array(list(dic.keys())) maxima = np.where(v == np.max(v))[0] if len(maxima) == 1: return k[maxima[0]] # In order to be consistent, always selects the minimum key # (guaranteed to be unique) when there are multiple maximum values. return k[maxima[np.argmin(k[maxima])]]
56e9c6d54547b16a881bdb110187f36b9812c178
3,635,606
def main(event, context): """ไธ€ไธชๅฏนๆ—ถ้—ดๅบๅˆ—่ฟ›่กŒ็บฟๆ€งๆ’ๅ€ผ็š„ๅ‡ฝๆ•ฐ, ๅนถไธ”่ฎก็ฎ—็บฟๆ€งๆ„ไน‰ไธŠ็š„ๅฏไฟกๅบฆใ€‚ """ timeAxis = event["timeAxis"] valueAxis = event["valueAxis"] timeAxisNew = event["timeAxisNew"] reliable_distance = event["reliable_distance"] timeAxis = [totimestamp(parser.parse(i)) for i in timeAxis] timeAxisNew = [totimestamp(parser.parse(i)) for i in timeAxisNew] valueAxisNew = linear_interpolate(timeAxis, valueAxis, timeAxisNew) reliabAxis = exam_reliability(timeAxis, timeAxisNew, reliable_distance) result = { "valueAxisNew": valueAxisNew.tolist(), "reliabAxis": reliabAxis, } return result
a5c67afd9f7c9b197c4847394869c27ae145f0bf
3,635,607
import xbmcaddon def Addon_Info(id='',addon_id=''): """ Retrieve details about an add-on, lots of built-in values are available such as path, version, name etc. CODE: Addon_Setting(id, [addon_id]) AVAILABLE PARAMS: (*) id - This is the name of the id you want to retrieve. The list of built in id's you can use (current as of 15th April 2017) are: author, changelog, description, disclaimer, fanart, icon, id, name, path, profile, stars, summary, type, version addon_id - By default this will use your current add-on id but you can access any add-on you want by entering an id in here. EXAMPLE CODE: dialog.ok('ADD-ON INFO','We will now try and pull name and version details for our current running add-on.') version = koding.Addon_Info(id='version') name = koding.Addon_Info(id='name') dialog.ok('NAME AND VERSION','[COLOR=dodgerblue]Add-on Name:[/COLOR] %s' % name,'[COLOR=dodgerblue]Version:[/COLOR] %s' % version) ~""" if addon_id == '': addon_id = Caller() ADDON = xbmcaddon.Addon(id=addon_id) if id == '': dialog.ok('ENTER A VALID ID','You\'ve called the Addon_Info function but forgot to add an ID. Please correct your code and enter a valid id to pull info on (e.g. "version")') else: return ADDON.getAddonInfo(id=id)
2fbdca3e5b4486c7fd702db3673433fe17dab4fe
3,635,608
import hashlib def _hash_string_to_color(string): """ Hash a string to color (using hashlib and not the built-in hash for consistency between runs) """ return COLOR_ARRAY[ int(hashlib.sha1(string.encode("utf-8")).hexdigest(), 16) % len(COLOR_ARRAY) ]
5539fba65f5d4c3cf245faea678f33d05c164aac
3,635,609
def get_build(id): """Show metadata for a single build. **Example request** .. code-block:: http GET /builds/1 HTTP/1.1 **Example response** .. code-block:: http HTTP/1.0 200 OK Content-Length: 367 Content-Type: application/json Date: Tue, 01 Mar 2016 17:21:28 GMT Server: Werkzeug/0.11.3 Python/3.5.0 { "bucket_name": "an-s3-bucket", "bucket_root_dir": "lsst_apps/builds/b1", "date_created": "2016-03-01T10:21:27.583795Z", "date_ended": null, "git_refs": [ "master" ], "github_requester": "jonathansick", "product_url": "http://localhost:5000/products/lsst_apps", "self_url": "http://localhost:5000/builds/1", "slug": "b1", "surrogate_key": "d290d35e579141e889e954a0b1f8b611", "uploaded": true } :param id: ID of the Build. :>json string bucket_name: Name of the S3 bucket hosting the built documentation. :>json string bucket_root_dir: Directory (path prefix) in the S3 bucket where this documentation build is located. :>json string date_created: UTC date time when the build was created. :>json string date_ended: UTC date time when the build was deprecated; will be ``null`` for builds that are *not deprecated*. :>json array git_refs: Git ref array that describes the version of the documentation being built. Typically this array will be a single string, e.g. ``['master']`` but may be a list of several refs for multi-package builds with ltd-mason. :>json string github_requester: GitHub username handle of person who triggered the build (null is not available). :>json string slug: slug of build; URL-safe slug. :>json string product_url: URL of parent product entity. :>json string published_url: Full URL where this build is published to the reader. :>json string self_url: URL of this build entity. :>json string surrogate_key: The surrogate key attached to the headers of all files on S3 belonging to this build. This allows LTD Keeper to notify Fastly when an Edition is being re-pointed to a new build. The client is responsible for uploading files with this value as the ``x-amz-meta-surrogate-key`` value. :>json bool uploaded: True if the built documentation has been uploaded to the S3 bucket. Use :http:patch:`/builds/(int:id)` to set this to `True`. :statuscode 200: No error. :statuscode 404: Build not found. """ return jsonify(Build.query.get_or_404(id).export_data())
3903188dad4236ec3675de893c1c7444fe8322a9
3,635,610
def get_unity_snapshotschedule_parameters(): """This method provide parameters required for the ansible snapshot schedule module on Unity""" return dict( name=dict(type='str'), id=dict(type='str'), type=dict(type='str', choices=['every_n_hours', 'every_day', 'every_n_days', 'every_week', 'every_month']), interval=dict(type='int'), hours_of_day=dict(type='list', elements='int'), day_interval=dict(type='int'), days_of_week=dict(type='list', elements='str', choices=['SUNDAY', 'MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY', 'FRIDAY', 'SATURDAY']), day_of_month=dict(type='int'), hour=dict(type='int'), minute=dict(type='int'), desired_retention=dict(type='int'), retention_unit=dict(type='str', choices=['hours', 'days'], default='hours'), auto_delete=dict(type='bool'), state=dict(required=True, type='str', choices=['present', 'absent']) )
a25cb6c62a0a69f2586135677802309e033d86bc
3,635,611
import io import zipfile import os def create_bootloader_win(interpreter_zip, executable, argv): """ Prepares executable for execution on target machine. Appends client code to `interpreter_zip` archive. Embeds new archive into `executable`. :param interpreter_zip: Zip file containing python runtime, stdlib and essential dependencies. :param executable: Loader executable. :param argv: list of arguments passed to client. :return: binary string containing payload ready for execution. """ with open(interpreter_zip, 'rb') as fp: zip_data = io.BytesIO(fp.read()) with zipfile.ZipFile(zip_data, 'a', zipfile.ZIP_DEFLATED, False) as fp: fp.writestr('argv.txt', '\n'.join(argv)) base_path = settings.SOURCE_DIR / 'common' for archive_path in enumerate_files(base_path, '.py'): file_path = base_path / archive_path archive_path = 'common/' + archive_path code = compile_file(file_path, archive_path) fp.writestr(archive_path + 'c', code) base_path = settings.SOURCE_DIR / 'client' for archive_path in enumerate_files(base_path, '.py'): file_path = base_path / archive_path if archive_path == 'main.py': archive_path = '__main__.py' else: archive_path = 'client/' + archive_path code = compile_file(file_path, archive_path) fp.writestr(archive_path + 'c', code) zip_data.seek(0, os.SEEK_SET) zip_data = zip_data.read() pe = pefile.PE(executable) pe_add_section(pe, zip_data, '.py') return pe.write()
5c72deb7022682b63fbc9304910b0b8c34eaa124
3,635,612
def gaussian_kernel(X, kernel_type="gaussian", sigma=3.0, k=5): """gaussian_kernel: Build an adjacency matrix for data using a Gaussian kernel Args: X (N x d np.ndarray): Input data kernel_type: "gaussian" or "adaptive". Controls bandwidth sigma (float): Scalar kernel bandwidth k (integer): nearest neighbor kernel bandwidth Returns: W (N x N np.ndarray): Weight/adjacency matrix induced from X """ _g = "gaussian" _a = "adaptive" kernel_type = kernel_type.lower() D = squareform(pdist(X)) if kernel_type == "gaussian": # gaussian bandwidth checking print("fixed bandwidth specified") if not all([type(sigma) is float, sigma > 0]): # [float, positive] print("invalid gaussian bandwidth, using sigma = max(min(D)) as bandwidth") D_find = D + np.eye(np.size(D, 1)) * 1e15 sigma = np.max(np.min(D_find, 1)) del D_find sigma = np.ones(np.size(D, 1)) * sigma elif kernel_type == "adaptive": # adaptive bandwidth print("adaptive bandwidth specified") # [integer, positive, less than the total samples] if not all([type(k) is int, k > 0, k < np.size(D, 1)]): print("invalid adaptive bandwidth, using k=5 as bandwidth") k = 5 knnDST = np.sort(D, axis=1) # sorted neighbor distances sigma = knnDST[:, k] # k-nn neighbor. 0 is self. del knnDST else: raise ValueError W = ((D**2) / sigma[:, np.newaxis]**2).T W = np.exp(-1 * (W)) W = (W + W.T) / 2 # symmetrize W = W - np.eye(W.shape[0]) # remove the diagonal return W
6d541e5d1faa12d3b61aa1eb5dba416efb303253
3,635,613
def get_all_camera_shapes(full_path=True): """ Returns all cameras shapes available in the current scene :param full_path: bool, Whether tor return full path to camera nodes or short ones :return: list(str) """ return maya.cmds.ls(type='camera', long=full_path) or list()
513207a51bce4ec74ff6bbb08357a0b0b975fffd
3,635,614
def CreateVGGishNetwork(hop_size=0.96): # Hop size is in seconds. """Define VGGish model, load the checkpoint, and return a dictionary that points to the different tensors defined by the model. """ vggish_slim.define_vggish_slim() checkpoint_path = 'vggish_model.ckpt' vggish_params.EXAMPLE_HOP_SECONDS = hop_size vggish_slim.load_vggish_slim_checkpoint(sess, checkpoint_path) features_tensor = sess.graph.get_tensor_by_name( vggish_params.INPUT_TENSOR_NAME) embedding_tensor = sess.graph.get_tensor_by_name( vggish_params.OUTPUT_TENSOR_NAME) layers = {'conv1': 'vggish/conv1/Relu', 'pool1': 'vggish/pool1/MaxPool', 'conv2': 'vggish/conv2/Relu', 'pool2': 'vggish/pool2/MaxPool', 'conv3': 'vggish/conv3/conv3_2/Relu', 'pool3': 'vggish/pool3/MaxPool', 'conv4': 'vggish/conv4/conv4_2/Relu', 'pool4': 'vggish/pool4/MaxPool', 'fc1': 'vggish/fc1/fc1_2/Relu', 'fc2': 'vggish/fc2/Relu', 'embedding': 'vggish/embedding', 'features': 'vggish/input_features', } g = tf.get_default_graph() for k in layers: layers[k] = g.get_tensor_by_name( layers[k] + ':0') return {'features': features_tensor, 'embedding': embedding_tensor, 'layers': layers, }
dc7725524ede7b02fc9afdf63f8aeecde5c9c092
3,635,615
def estimate_mpk_parms_1d( pk_pos_0, x, f, pktype='pvoigt', bgtype='linear', fwhm_guess=0.07, center_bnd=0.02 ): """ Generate function-specific estimate for multi-peak parameters. Parameters ---------- pk_pos_0 : TYPE DESCRIPTION. x : TYPE DESCRIPTION. f : TYPE DESCRIPTION. pktype : TYPE, optional DESCRIPTION. The default is 'pvoigt'. bgtype : TYPE, optional DESCRIPTION. The default is 'linear'. fwhm_guess : TYPE, optional DESCRIPTION. The default is 0.07. center_bnd : TYPE, optional DESCRIPTION. The default is 0.02. Returns ------- p0 : TYPE DESCRIPTION. bnds : TYPE DESCRIPTION. """ npts = len(x) assert len(f) == npts, "ordinate and data must be same length!" num_pks = len(pk_pos_0) min_val = np.min(f) # estimate background with SNIP1d bkg = snip1d(np.atleast_2d(f), w=int(np.floor(0.25*len(f)))).flatten() # fit linear bg and grab params bp, _ = optimize.curve_fit(lin_fit_obj, x, bkg, jac=lin_fit_jac) bg0 = bp[-1] bg1 = bp[0] if pktype == 'gaussian' or pktype == 'lorentzian': p0tmp = np.zeros([num_pks, 3]) p0tmp_lb = np.zeros([num_pks, 3]) p0tmp_ub = np.zeros([num_pks, 3]) # x is just 2theta values # make guess for the initital parameters for ii in np.arange(num_pks): pt = np.argmin(np.abs(x - pk_pos_0[ii])) p0tmp[ii, :] = [ (f[pt] - min_val), pk_pos_0[ii], fwhm_guess ] p0tmp_lb[ii, :] = [ (f[pt] - min_val)*0.1, pk_pos_0[ii] - center_bnd, fwhm_guess*0.5 ] p0tmp_ub[ii, :] = [ (f[pt] - min_val)*10.0, pk_pos_0[ii] + center_bnd, fwhm_guess*2.0 ] elif pktype == 'pvoigt': p0tmp = np.zeros([num_pks, 4]) p0tmp_lb = np.zeros([num_pks, 4]) p0tmp_ub = np.zeros([num_pks, 4]) # x is just 2theta values # make guess for the initital parameters for ii in np.arange(num_pks): pt = np.argmin(np.abs(x - pk_pos_0[ii])) p0tmp[ii, :] = [ (f[pt] - min_val), pk_pos_0[ii], fwhm_guess, 0.5 ] p0tmp_lb[ii, :] = [ (f[pt] - min_val)*0.1, pk_pos_0[ii] - center_bnd, fwhm_guess*0.5, 0.0 ] p0tmp_ub[ii, :] = [ (f[pt] - min_val+1.)*10.0, pk_pos_0[ii] + center_bnd, fwhm_guess*2.0, 1.0 ] elif pktype == 'split_pvoigt': p0tmp = np.zeros([num_pks, 6]) p0tmp_lb = np.zeros([num_pks, 6]) p0tmp_ub = np.zeros([num_pks, 6]) # x is just 2theta values # make guess for the initital parameters for ii in np.arange(num_pks): pt = np.argmin(np.abs(x - pk_pos_0[ii])) p0tmp[ii, :] = [ (f[pt] - min_val), pk_pos_0[ii], fwhm_guess, fwhm_guess, 0.5, 0.5 ] p0tmp_lb[ii, :] = [ (f[pt] - min_val)*0.1, pk_pos_0[ii] - center_bnd, fwhm_guess*0.5, fwhm_guess*0.5, 0.0, 0.0 ] p0tmp_ub[ii, :] = [ (f[pt] - min_val)*10.0, pk_pos_0[ii] + center_bnd, fwhm_guess*2.0, fwhm_guess*2.0, 1.0, 1.0 ] if bgtype == 'linear': num_pk_parms = len(p0tmp.ravel()) p0 = np.zeros(num_pk_parms+2) lb = np.zeros(num_pk_parms+2) ub = np.zeros(num_pk_parms+2) p0[:num_pk_parms] = p0tmp.ravel() lb[:num_pk_parms] = p0tmp_lb.ravel() ub[:num_pk_parms] = p0tmp_ub.ravel() p0[-2] = bg0 p0[-1] = bg1 lb[-2] = minf lb[-1] = minf ub[-2] = inf ub[-1] = inf elif bgtype == 'constant': num_pk_parms = len(p0tmp.ravel()) p0 = np.zeros(num_pk_parms+1) lb = np.zeros(num_pk_parms+1) ub = np.zeros(num_pk_parms+1) p0[:num_pk_parms] = p0tmp.ravel() lb[:num_pk_parms] = p0tmp_lb.ravel() ub[:num_pk_parms] = p0tmp_ub.ravel() p0[-1] = np.average(bkg) lb[-1] = minf ub[-1] = inf elif bgtype == 'quadratic': num_pk_parms = len(p0tmp.ravel()) p0 = np.zeros(num_pk_parms+3) lb = np.zeros(num_pk_parms+3) ub = np.zeros(num_pk_parms+3) p0[:num_pk_parms] = p0tmp.ravel() lb[:num_pk_parms] = p0tmp_lb.ravel() ub[:num_pk_parms] = p0tmp_ub.ravel() p0[-3] = bg0 p0[-2] = bg1 lb[-3] = minf lb[-2] = minf lb[-1] = minf ub[-3] = inf ub[-2] = inf ub[-1] = inf return p0, (lb, ub)
d1d92548e4d3125bb1df9cea7f5037a262a5594c
3,635,616
from typing import Tuple from typing import Optional import json async def _parse_collection_from_search( request: Request, ) -> Tuple[Optional[str], Optional[str]]: """ Parse the collection id from a search request. The search endpoint is a bit of a special case. If it's a GET, the collection and item ids are in the querystring. If it's a POST, the collection and item may be in either a CQL-JSON or CQL2-JSON filter body, or a query/stac-ql body. """ if request.method.lower() == "get": collection_id = request.query_params.get("collections") item_id = request.query_params.get("ids") return (collection_id, item_id) elif request.method.lower() == "post": try: body = await request.json() if "collections" in body: return _parse_queryjson(body) elif "filter" in body: return _parse_cqljson(body["filter"]) except json.JSONDecodeError: logger.warning( "Unable to parse search body as JSON. Ignoring collection parameter." ) return (None, None)
52d392e13dce549905e357dc7a09b592e45d6c9e
3,635,617
import itertools def make_cnf_clauses_by_group(N_, board_group, varboard_group): """ :param board_group: e.g. a row of sudoku board, of shape (M...) :param varboard_group: e.g. a row of sudoku variable id, of shape (M..., N_) """ cclauses_local = [] board_group = board_group.reshape(-1) varboard_group = varboard_group.reshape((board_group.shape[0], -1)) oh = inv_oh(N_, board_group[board_group > 0]) vidx = varboard_group[np.where(board_group == 0)[0]] poh = (oh > 0) ohpvidx = vidx[:, np.where(poh)[0]].T ohnvidx = vidx[:, np.where(~poh)[0]].reshape(-1) cclauses_local.extend(itertools.chain.from_iterable( (cnf.load_precomputed_xorcnf(x.tolist()) for x in ohpvidx))) cclauses_local.extend((-ohnvidx[:, np.newaxis]).tolist()) return cclauses_local
e3dc103a5a1674141780aed4d9af1e1d3eea0927
3,635,618
from typing import Tuple def get_operations( archive_action: str, archive_type: str, compression_type: str ) -> Tuple[Operation]: """ A function to fetch relevant operations based on type of archive and compression if any. """ operations = { "archive_ops": { "zip": { "extract": extract_zip_archive, "archive": make_zip_archive, }, "tar": { "extract": extract_tar_archive, "archive": make_tar_archive, }, }, "compression_ops": { "gzip": {"compress": gz_compress, "decompress": gz_decompress}, "xz": {"compress": xz_compress, "decompress": xz_decompress}, "bzip2": {"compress": bz2_compress, "decompress": bz2_decompress,}, }, } archive_op = operations["archive_ops"][archive_type][archive_action] compression_op = None if compression_type is not None: compression_action = ( "compress" if archive_action == "archive" else "decompress" ) compression_op = operations["compression_ops"][compression_type][ compression_action ] return archive_op, compression_op
39e03759f565a2969a3c50cbe5d314a131d498b4
3,635,619
import os def import_spyview_dat(data_dir, filename): """ Returns a np.array in the same shape as the raw .dat file """ with open(os.path.join(data_dir, filename)) as f: dat = np.loadtxt(f) return dat
2fe12eba01c2fa4f779366bb5232c4ef8b8065c8
3,635,620
def Norm(norm, *args, **kwargs): """ Return an arbitrary `~matplotlib.colors.Normalize` instance. Used to interpret the `norm` and `norm_kw` arguments when passed to any plotting method wrapped by `~proplot.axes.cmap_changer`. See `this tutorial \ <https://matplotlib.org/tutorials/colors/colormapnorms.html>`__ for more info. Parameters ---------- norm : str or `~matplotlib.colors.Normalize` The normalizer specification. If a `~matplotlib.colors.Normalize` instance already, the input argument is simply returned. Otherwise, `norm` should be a string corresponding to one of the "registered" colormap normalizers (see below table). If `norm` is a list or tuple and the first element is a "registered" normalizer name, subsequent elements are passed to the normalizer class as positional arguments. .. _norm_table: ========================== ===================================== Key(s) Class ========================== ===================================== ``'null'``, ``'none'`` `~matplotlib.colors.NoNorm` ``'diverging'``, ``'div'`` `~proplot.colors.DivergingNorm` ``'segmented'`` `~proplot.colors.LinearSegmentedNorm` ``'linear'`` `~matplotlib.colors.Normalize` ``'log'`` `~matplotlib.colors.LogNorm` ``'power'`` `~matplotlib.colors.PowerNorm` ``'symlog'`` `~matplotlib.colors.SymLogNorm` ========================== ===================================== Other parameters ---------------- *args, **kwargs Passed to the `~matplotlib.colors.Normalize` initializer. Returns ------- `~matplotlib.colors.Normalize` A `~matplotlib.colors.Normalize` instance. """ if isinstance(norm, mcolors.Normalize): return norm # Pull out extra args if np.iterable(norm) and not isinstance(norm, str): norm, args = norm[0], (*norm[1:], *args) if not isinstance(norm, str): raise ValueError(f'Invalid norm name {norm!r}. Must be string.') # Get class if norm not in NORMS: raise ValueError( f'Unknown normalizer {norm!r}. Options are: ' + ', '.join(map(repr, NORMS.keys())) + '.' ) if norm == 'symlog' and not args and 'linthresh' not in kwargs: kwargs['linthresh'] = 1 # special case, needs argument return NORMS[norm](*args, **kwargs)
e3018d77dbd629a367c8cdfb10d13318e388ddd3
3,635,621
def run_trajectory( model, time_stop, time_step, initial_state, seed, n_points=500, docker=None): """ Run one trajectory using the given model and initial state Parameters ---------- model: str smoldyn model description time_stop: float Simulation duration time_step: Float Interval between two timesteps initial_state: [Mol] list of molecules at t=0 seed: int seed used to run smoldyn n_points: int number of time samples docker: str name of docker container to be used """ input_string = fill_model( model, time_stop, time_step, initial_state, seed if seed is not None else npr.randint(10**9), n_points) raw_data = run_smoldyn(input_string, docker) # Collect results history, last_state = [ e.strip().split("\n") for e in raw_data.split("--Simulation ends--\n")] return (parse_history(history), parse_last_state(last_state))
38f497f93fdeb978de420d85fc910cd674c0faf4
3,635,622
from pathlib import Path def get_project_root_dir() -> Path: """ Gets the Root path of Project Returns: Path: of Root project """ root_path = _get_script_file() return root_path.parent
30de0b9a91770e201cf921a56bb512a67e0dd486
3,635,623
from typing import Dict def get_deployment_statuses() -> Dict[str, DeploymentStatusInfo]: """Returns a dictionary of deployment statuses. A deployment's status is one of {UPDATING, UNHEALTHY, and HEALTHY}. Example: >>> from ray.serve.api import get_deployment_statuses >>> statuses = get_deployment_statuses() # doctest: +SKIP >>> status_info = statuses["deployment_name"] # doctest: +SKIP >>> status = status_info.status # doctest: +SKIP >>> message = status_info.message # doctest: +SKIP Returns: Dict[str, DeploymentStatus]: This dictionary maps the running deployment's name to a DeploymentStatus object containing its status and a message explaining the status. """ return get_global_client().get_deployment_statuses()
00fc586f36256b2a73d1b78dff8d5af79b3f5e8e
3,635,624
def dumps_tikz(g, scale='0.5em'): """Return TikZ code as `str` for `networkx` graph `g`.""" s = [] s.append(padding_remove(r""" \begin{{tikzpicture}}[ signal flow, pin distance=1pt, label distance=-2pt, x={scale}, y={scale}, baseline=(current bounding box.center), ]""").format(scale=scale)) def fix(n): n = str(n) return "{" + n.replace('.', '/') + "}" for n, d in g.nodes(data=True): n = fix(n) # label label = d.get('label', None) angle = d.get('angle', '-45') X, Y = d['pos'] if label is not None: label = 'pin={{{ang}: {label}}}'.format(ang=angle, label=label) # geometry color = d.get('color', None) shape = d.get('shape', 'nodeS') # style style = r', '.join(filter(None, [shape, label])) s.append(r'\node[{style}] ({n}) at ({X}, {Y}) {{}};'.format(style=style, n=n, X=X, Y=Y)) s.append('') s.append(r'\path') for u, v, d in g.edges(data=True): u2 = fix(u) v2 = fix(v) edge_text = d.get('edge_text', None) handed = d.get('handed', 'l') dist = d.get('handed', 0.5) label = d.get('label', '') color = d.get('color', '') bend = d.get('bend', 0) suppress = d.get('suppress', False) if suppress: continue if edge_text is None: if label: label = ' node {{{label}}}'.format(label=label) if handed == 'l': etype = "sflow={}".format(dist) elif handed == 'r': etype = "sflow'={}".format(dist) else: raise NotImplementedError("unknown handedness") if bend != 0: bend = 'bend right={}'.format(bend) else: bend = None if u == v: loop = g.nodes[u].get('loop', 70) loop_width = g.nodes[u].get('loop_width', 70) loop = 'min distance=5mm, in={i}, out={o}, looseness=25'.format(i=loop + loop_width/2, o=loop - loop_width/2) bend = None else: loop = None style = r', '.join(filter(None, [etype, bend, loop, color])) s.append(r'({u}) edge[{style}]{label} ({v})'.format(style=style, label=label, u=u2, v=v2)) else: s.append("({u}) {etext} ({v})".format(u=u2, v=v2, etext=edge_text)) s.append(';') s.append(r'\end{tikzpicture}') return '\n'.join(s)
7bca58ded761992d455029055b167568414ef759
3,635,625
def drawModel(ax, model): """ ๅฐ†ๆจกๅž‹็š„ๅˆ†็ฆป่ถ…ๅนณ้ขๅฏ่ง†ๅŒ– """ x1 = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 100) x2 = np.linspace(ax.get_ylim()[0], ax.get_ylim()[1], 100) X1, X2 = np.meshgrid(x1, x2) Y = model.predict_proba(np.c_[X1.ravel(), X2.ravel()])[:, 1] Y = Y.reshape(X1.shape) ax.contourf(X1, X2, Y, levels=[0, 0.5], colors=["gray"], alpha=0.4) return ax
4e4e464682c970ed90e0db8a06696891af51280a
3,635,626
import sys def keyboard_interrupt(func): """Decorator to be used on a method to check if there was a keyboard interrupt error that was raised.""" def wrap(self, *args, **kwargs): try: return func(self, *args, **kwargs) except KeyboardInterrupt: self.close() # this will close the visualizer if necessary sys.exit(0) return wrap
1914924986c278bb919274b746ce13fb718268e8
3,635,627
import warnings def correct_mpl(obj): """ This procedure corrects MPL data: 1.) Throw out data before laser firing (heights < 0). 2.) Remove background signal. 3.) Afterpulse Correction - Subtraction of (afterpulse-darkcount). NOTE: Currently the Darkcount in VAPS is being calculated as the afterpulse at ~30km. But that might not be absolutely correct and we will likely start providing darkcount profiles ourselves along with other corrections. 4.) Range Correction. 5.) Overlap Correction (Multiply). Note: Deadtime and darkcount corrections are not being applied yet. Parameters ---------- obj : Dataset object The ACT object. Returns ------- obj : Dataset object The ACT Object containing the corrected values. """ # Get some variables before processing begins act = obj.act # Overlap Correction Variable op = obj['overlap_correction'].values[0, :] op_height = obj['overlap_correction_heights'].values[0, :] # 1 - Remove negative height data obj = obj.where(obj.height > 0, drop=True) height = obj['height'].values # The drop strips out the ACT data so re-populating obj.act = act # Get indices for calculating background var_names = ['signal_return_co_pol', 'signal_return_cross_pol'] ind = [obj.height.shape[1] - 50, obj.height.shape[1] - 2] # Subset last gates into new dataset dummy = obj.isel(range_bins=xr.DataArray(np.arange(ind[0], ind[1]))) # Turn off warnings warnings.filterwarnings("ignore") # Run through co and cross pol data for corrections co_bg = dummy[var_names[0]] co_bg = co_bg.where(co_bg > -9998.) co_bg = co_bg.mean(dim='dim_0').values x_bg = dummy[var_names[1]] x_bg = x_bg.where(x_bg > -9998.) x_bg = x_bg.mean(dim='dim_0').values # Seems to be the fastest way of removing background signal at the moment co_data = obj[var_names[0]].where(obj[var_names[0]] > 0).values x_data = obj[var_names[1]].where(obj[var_names[1]] > 0).values for i in range(len(obj['time'].values)): co_data[i, :] = co_data[i, :] - co_bg[i] x_data[i, :] = x_data[i, :] - x_bg[i] # After Pulse Correction Variable co_ap = obj['afterpulse_correction_co_pol'].values x_ap = obj['afterpulse_correction_cross_pol'].values for j in range(len(obj['range_bins'].values)): # Afterpulse Correction co_data[:, j] = co_data[:, j] - co_ap[:, j] x_data[:, j] = x_data[:, j] - x_ap[:, j] # R-Squared Correction co_data[:, j] = co_data[:, j] * height[:, j] ** 2. x_data[:, j] = x_data[:, j] * height[:, j] ** 2. # Overlap Correction idx = (np.abs(op_height - height[0, j])).argmin() co_data[:, j] = co_data[:, j] * op[idx] x_data[:, j] = x_data[:, j] * op[idx] # Create the co/cross ratio variable ratio = (x_data / co_data) * 100. obj['cross_co_ratio'] = obj[var_names[0]].copy(data=ratio) # Convert data to decibels co_data = 10. * np.log10(co_data) x_data = 10. * np.log10(x_data) # Write data to object obj[var_names[0]].values = co_data obj[var_names[1]].values = x_data return obj
4d1da14d35e26dcd5ebc56fc333969e593ca02f8
3,635,628
def cutoff_depth(d: int): """A cutoff function that searches to depth d.""" return lambda game, state, depth: depth > d
af7396a92f1cd234263e8448a6d1d22b56f4a12c
3,635,629
from typing import Dict def create_contributor_node(d: Dict, label: str = "Contributor") -> Node: """ Using the k, v pairs in `d`, create a Node object with those properties. Takes k, as-is except for 'uuid', which is cast to int. Args: d (dict): property k, v pairs label (str): The py2neo.Node.__primarylabel__ to assign Returns: (Node): py2neo.Node instance of type `label` """ uuid = int(d.get('uuid', -1)) contributor = Node("Contributor", uuid=uuid, name=d.get('name'), github_id=d.get('github_id'), login=d.get('login'), host_type=d.get('host_type')) return contributor
bbb83ec8a8c76678c91ea8b009c9e137c03f025b
3,635,630
def extractLipsHaarCascade(haarDetector, frame): """Function to extract lips from a frame""" gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) roi_gray = 0 faces = haarDetector.detectMultiScale(gray, 1.3, 5) if len(faces) == 0: roi_gray = cv2.resize(gray, (150, 100)) return roi_gray for (x, y, w, h) in faces: roi_gray = gray[y + (2 * h // 3):y + h, x:x + w] roi_gray = cv2.resize(roi_gray, (150, 100)) return roi_gray
f1d220df5af2dc2d6905aba05344ca04a96de8d5
3,635,631
import sys def we_are_frozen(): """Returns whether we are frozen via py2exe. This will affect how we find out where we are located.""" return hasattr(sys, "frozen")
98d9a5b8e304a4e615ab0ffe2548a8e10a348783
3,635,632
def rgb_to_hex(red, green, blue): """Return color as #rrggbb for the given RGB color values.""" return '#%02x%02x%02x' % (int(red), int(green), int(blue))
7523bcb4b7a033655c9f5059fcf8d0ed656502c8
3,635,633
from typing import Callable def rescue( function: Callable[ [_SecondType], KindN[_RescuableKind, _FirstType, _UpdatedType, _ThirdType], ], ) -> Kinded[Callable[ [KindN[_RescuableKind, _FirstType, _SecondType, _ThirdType]], KindN[_RescuableKind, _FirstType, _UpdatedType, _ThirdType], ]]: """ Turns function's input parameter from a regular value to a container. In other words, it modifies the function signature from: ``a -> Container[b]`` to: ``Container[a] -> Container[b]`` Similar to :func:`returns.pointfree.bind`, but works for failed containers. This is how it should be used: .. code:: python >>> from returns.pointfree import rescue >>> from returns.result import Success, Failure, Result >>> def example(argument: int) -> Result[str, int]: ... return Success(argument + 1) >>> assert rescue(example)(Success('a')) == Success('a') >>> assert rescue(example)(Failure(1)) == Success(2) Note, that this function works for all containers with ``.rescue`` method. See :class:`returns.interfaces.rescuable.Rescuable` for more info. """ @kinded def factory( container: KindN[_RescuableKind, _FirstType, _SecondType, _ThirdType], ) -> KindN[_RescuableKind, _FirstType, _UpdatedType, _ThirdType]: return internal_rescue(container, function) return factory
c5474129e260729c61f5ecc80e3c1bb714195b25
3,635,634
def try_get_resource(_xmlroot, parent_node: str, child_node: str, _lang: str): """ ะŸะพะปัƒั‡ะธั‚ัŒ ั€ะตััƒั€ั (ั€ะตัˆะตะฝะธะต / ัƒัะปะพะฒะธั) """ for tutorial in _xmlroot.find(parent_node).iter(child_node): lang = tutorial.attrib['language'] _type = tutorial.attrib['type'] if lang == _lang and _type == 'application/x-tex': found = True path = tutorial.attrib['path'] encoding = tutorial.attrib['charset'] break return ResourceSearchResult(found, path, encoding)
7cb362ec0c1e8fb7926b67b3790b8c5bc9539a67
3,635,635
import os def download_song(file_name, content): """ Download the audio file from YouTube. """ _, extension = os.path.splitext(file_name) if extension in ('.webm', '.m4a'): link = content.getbestaudio(preftype=extension[1:]) else: log.debug('No audio streams available for {} type'.format(extension)) return False if link: log.debug('Downloading from URL: ' + link.url) filepath = os.path.join(const.args.folder, file_name) log.debug('Saving to: ' + filepath) link.download(filepath=filepath) return True else: log.debug('No audio streams available') return False
df9fcf9b6fd1942aa616f5ebb261ba9a1d3f30d2
3,635,636
def load_distributed_dataset(split, batch_size, name, drop_remainder, use_bfloat16, normalize=False, with_info=False, proportion=1.0): """Loads CIFAR dataset for training or testing. Args: split: tfds.Split. batch_size: The global batch size to use. name: A string indicates whether it is cifar10 or cifar100. drop_remainder: A boolean indicates whether to drop the remainder of the batches. If True, the batch dimension will be static. use_bfloat16: data type, bfloat16 precision or float32. normalize: Whether to apply mean-std normalization on features. with_info: bool. proportion: float, the proportion of dataset to be used. Returns: Tuple of (tf.data.Dataset, tf.data.DatasetInfo) if with_info else only the dataset. """ if use_bfloat16: dtype = tf.bfloat16 else: dtype = tf.float32 if proportion == 1.0: dataset, ds_info = tfds.load(name, split=split, with_info=True, as_supervised=True) else: name = '{}:3.*.*'.format(name) # TODO(ywenxu): consider the case where we have splits of train, val, test. if split == tfds.Split.TRAIN: split_str = 'train[:{}%]'.format(int(100 * proportion)) else: split_str = 'test[:{}%]'.format(int(100 * proportion)) dataset, ds_info = tfds.load(name, split=split_str, with_info=True, as_supervised=True) # Disable intra-op parallelism to optimize for throughput instead of # latency. options = tf.data.Options() options.experimental_threading.max_intra_op_parallelism = 1 dataset = dataset.with_options(options) # Prefetches a batch at a time to smooth out the time taken to load input # files for shuffling and processing. if split == tfds.Split.TRAIN: dataset_size = ds_info.splits['train'].num_examples dataset = dataset.shuffle(buffer_size=dataset_size).repeat() image_shape = ds_info.features['image'].shape def preprocess(image, label): """Image preprocessing function.""" if split == tfds.Split.TRAIN: image = tf.image.resize_with_crop_or_pad( image, image_shape[0] + 4, image_shape[1] + 4) image = tf.image.random_crop(image, image_shape) image = tf.image.random_flip_left_right(image) image = tf.image.convert_image_dtype(image, dtype) if normalize: mean = tf.constant([0.4914, 0.4822, 0.4465]) std = tf.constant([0.2023, 0.1994, 0.2010]) image = (image - mean) / std label = tf.cast(label, dtype) return image, label dataset = dataset.map(preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.batch(batch_size, drop_remainder=drop_remainder) # Operations between the final prefetch and the get_next call to the # iterator will happen synchronously during run time. We prefetch here again # to background all of the above processing work and keep it out of the # critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE # allows DistributionStrategies to adjust how many batches to fetch based on # how many devices are present. dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) if with_info: return dataset, ds_info return dataset
6d2652fdae9acff9cbe3e134dfac6d0df5fbd048
3,635,637
def get_sample_media(): """Gets the sample media. Returns: bytes """ path = request.args.get("path") # `conditional`: support partial content return send_file(path, conditional=True)
9e3d98928096b4261bf3451564791384423524ef
3,635,638
def _is_swiftmodule(path): """Predicate to identify Swift modules/interfaces.""" return path.endswith((".swiftmodule", ".swiftinterface"))
085fa4f8735ce371927f606239d51c44bcca5acb
3,635,639
import itertools def _unpack_array(fmt, buff, offset, count): """Unpack an array of items. :param fmt: The struct format string :type fmt: str :param buff: The buffer into which to unpack :type buff: buffer :param offset: The offset at which to start unpacking :type offset: int :param count: The number of items in the array :type count: int """ output = [] for i in range(count): item, offset = _unpack(fmt, buff, offset) output.append(item) if len(fmt) == 1: output = list(itertools.chain.from_iterable(output)) return output, offset
4aad2b38a332a9c57e4bb412990b4ba8ffd282dd
3,635,640
def _add_column_and_sort_table(sources, pointing_position): """Sort the table and add the column separation (offset from the source) and phi (position angle from the source) Parameters ---------- sources : `~astropy.table.Table` Table of excluded sources. pointing_position : `~astropy.coordinates.SkyCoord` Coordinates of the pointing position Returns ------- sources : `~astropy.table.Table` given sources table sorted with extra column "separation" and "phi" """ sources = sources.copy() source_pos = SkyCoord(sources["RA"], sources["DEC"], unit="deg") sources["separation"] = pointing_position.separation(source_pos) sources["phi"] = pointing_position.position_angle(source_pos) sources.sort("separation") return sources
780fa4cd5ebed99b556cb283f41a52594921db39
3,635,641
from jsmin import jsmin as jsmin_processor def jsmin(content): """ Minify your JavaScript code. Use `jsmin <https://pypi.python.org/pypi/jsmin>`_ to compress JavaScript. You must manually install jsmin if you want to use this processor. Args: content: your JavaScript code Returns: the minified version of your JavaScript code, or the original content if the Flask application is in Debug mode Raises: CompressorProcessorException: if jsmin is not installed. """ try: except ImportError: raise CompressorProcessorException("'jsmin' is not installed. Please" " install it if you want to use " "the 'jsmin' processor.") if current_app.debug is True: # do not minify return content return jsmin_processor(content)
9ab9b29ed74cf798ec868e4fbdf8d5df697e3339
3,635,642
from typing import IO import sys import tempfile def generate_server_config() -> IO[bytes]: """Returns a temporary generated file for use as the server config.""" boards = stm32f429i_detector.detect_boards() if not boards: _LOG.critical('No attached boards detected') sys.exit(1) config_file = tempfile.NamedTemporaryFile() _LOG.debug('Generating test server config at %s', config_file.name) _LOG.debug('Found %d attached devices', len(boards)) for board in boards: test_runner_args = [ '--stlink-serial', board.serial_number, '--port', board.dev_name ] config_file.write( generate_runner(_TEST_RUNNER_COMMAND, test_runner_args).encode('utf-8')) config_file.flush() return config_file
d61177fde609be4bfc4803cb5f0aaf92a1bdc040
3,635,643
import torch def smooth_l1_loss_detectron2(input, target, beta: float, reduction: str = "none"): """ Smooth L1 loss defined in the Fast R-CNN paper as: | 0.5 * x ** 2 / beta if abs(x) < beta smoothl1(x) = | | abs(x) - 0.5 * beta otherwise, where x = input - target. Smooth L1 loss is related to Huber loss, which is defined as: | 0.5 * x ** 2 if abs(x) < beta huber(x) = | | beta * (abs(x) - 0.5 * beta) otherwise Smooth L1 loss is equal to huber(x) / beta. This leads to the following differences: - As beta -> 0, Smooth L1 loss converges to L1 loss, while Huber loss converges to a constant 0 loss. - As beta -> +inf, Smooth L1 converges to a constant 0 loss, while Huber loss converges to L2 loss. - For Smooth L1 loss, as beta varies, the L1 segment of the loss has a constant slope of 1. For Huber loss, the slope of the L1 segment is beta. Smooth L1 loss can be seen as exactly L1 loss, but with the abs(x) < beta portion replaced with a quadratic function such that at abs(x) = beta, its slope is 1. The quadratic segment smooths the L1 loss near x = 0. Args: input (Tensor): input tensor of any shape target (Tensor): target value tensor with the same shape as input beta (float): L1 to L2 change point. For beta values < 1e-5, L1 loss is computed. reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum': The output will be summed. Returns: The loss with the reduction option applied. Note: PyTorch's builtin "Smooth L1 loss" implementation does not actually implement Smooth L1 loss, nor does it implement Huber loss. It implements the special case of both in which they are equal (beta=1). See: https://pytorch.org/docs/stable/nn.html#torch.nn.SmoothL1Loss. """ if beta < 1e-5: # if beta == 0, then torch.where will result in nan gradients when # the chain rule is applied due to pytorch implementation details # (the False branch "0.5 * n ** 2 / 0" has an incoming gradient of # zeros, rather than "no gradient"). To avoid this issue, we define # small values of beta to be exactly l1 loss. loss = torch.abs(input - target) else: n = torch.abs(input - target) cond = n < beta loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta) if reduction == "mean": loss = loss.mean() elif reduction == "sum": loss = loss.sum() return loss
d6e9264e8de9acf3fec59cb70207c1aa4075ece6
3,635,644
def png_to_jpg(png_path, jpg_path): """ convert image format: png -> jpg, then save picture with jpg Args: png_path (str) jpg_path (str) Return: True or False (bool) """ img = Image.open(png_path) try: if len(img.split()) == 4: # prevent IOError: cannot write mode RGBA as BMP r, g, b, a = img.split() img = Image.merge("RGB", (r, g, b)) img.convert('RGB').save(jpg_path, quality=100) else: img.convert('RGB').save(jpg_path, quality=100) return True except Exception: return False
255f5ed67d8929c05cbf573fcc64c45ff019ece1
3,635,645
import warnings import io def split_lines_to_df(in_lines_trunc_df): """ For a column of strings that each represent the line of a CSV (and each line may have a different number of separators), read them into a DataFrame. in_lines_trunc_df: Assumes that the relevant column is `0` Returns: The resulting DataFrame """ with warnings.catch_warnings(): # Ignore dtype warnings at this point, because we check them later on (after casting) warnings.filterwarnings( "ignore", message='.*Specify dtype option on import or set low_memory=False', category=pd.errors.DtypeWarning, ) with io.StringIO('\n'.join(in_lines_trunc_df[0])) as in_lines_trunc_stream: df_trimmed = pd.read_csv( in_lines_trunc_stream, header=None, index_col=0, sep=INPUT_SEPARATOR, names=range(in_lines_trunc_df[0].str.count(INPUT_SEPARATOR).max() + 1), ).rename_axis(index=ROW_ID_NAME) return df_trimmed
eec9624a88f0758d4db2ecafc6df3eaa9e3a3eb2
3,635,646
def generate_full_vast_beleg_ids_request_xml(form_data, th_fields=None, use_testmerker=False): """ Generates the full xml for the Verfahren "ElsterDatenabholung" and the Datenart "ElsterVaStDaten", including "Anfrage" field. An example xml can be found in the Eric documentation under common/Schnittstellenbeschreibungen/Sonstige/ElsterDatenabholung/Beispiele/1_ElsterDatenabholung_Liste_Anfrage.xml """ if not th_fields: th_fields = get_vast_beleg_ids_request_th_fields(use_testmerker) return generate_full_xml(th_fields, _add_vast_xml_nutzdaten_header, _add_vast_beleg_ids_request_nutzdaten, form_data)
88dd6e5e374deec62ce1229525bc936e2fb2ac79
3,635,647
def get_q_vocab(ques, count_thr=0, insert_unk=False): """ Args: ques: ques[qid] = {tokenized_question, ...} count_thr: int (not included) insert_unk: bool, insert_unk or not Return: vocab: list of vocab """ counts = {} for qid, content in ques.iteritems(): word_tokens = content['tokenized_question'] for word in word_tokens: counts[word] = counts.get(word, 0) + 1 cw = sorted([(count,w) for w,count in counts.iteritems()], reverse=True) print('top words and their counts:') print('\n'.join(map(str,cw[:20]))) total_words = sum(counts.itervalues()) print('total words:', total_words) bad_words = [w for w,n in counts.iteritems() if n <= count_thr] vocab = [w for w,n in counts.iteritems() if n > count_thr] bad_count = sum(counts[w] for w in bad_words) print('number of bad words: %d/%d = %.2f%%' % (len(bad_words), len(counts), len(bad_words)*100.0/len(counts))) print('number of words in vocab would be %d' % (len(vocab), )) print('number of UNKs: %d/%d = %.2f%%' % (bad_count, total_words, bad_count*100.0/total_words)) if insert_unk: print('inserting the special UNK token') vocab.append('<UNK>') return vocab
4926a595d2d4bff50db986ad012a21357fb9b8ec
3,635,648
def get_desc_dist(descriptors1, descriptors2): """ Given two lists of descriptors compute the descriptor distance between each pair of feature. """ #desc_dists = 2 - 2 * (descriptors1 @ descriptors2.transpose()) desc_sims = - descriptors1 @ descriptors2.transpose() # desc_sims = desc_sims.astype('float64') # # Weight the descriptor distances # desc_sims = np.exp(desc_sims) # desc_sims /= np.sum(desc_sims, axis=1, keepdims=True) # desc_sims = 1 - desc_sims*desc_sims #desc_dist = np.linalg.norm(descriptors1[:, None] - descriptors2[None], axis=2) #desc_dist = 2 - 2 * descriptors1 @ descriptors2.transpose() return desc_sims
2baea3bfa01b77765ec3ce95fd9a6be742783420
3,635,649
def _parse_cells_icdar(xml_table): """ Gets the table cells from a table in ICDAR-XML format. """ cells = list() xml_cells = xml_table.findall(".//cell") cell_id = 0 for xml_cell in xml_cells: text = get_text(xml_cell) start_row = get_attribute(xml_cell, "start-row") start_col = get_attribute(xml_cell, "start-col") end_row = get_attribute(xml_cell, "end-row") end_col = get_attribute(xml_cell, "end-col") cells.append(Cell(cell_id, text, start_row, start_col, end_row, end_col)) cell_id += 1 return cells
fdfca5c77f1122ae14bc8b72a676ab5cafab6c63
3,635,650
from typing import Optional from typing import BinaryIO import requests import time def download(link: str, method: str = "GET", to_file: Optional[BinaryIO] = None, headers: Optional[dict] = None, allow_redirects: bool = True, max_retries: int = 3) -> "Response": """ Return Response named tuple Response.response - requests.Response object Response.size - size of downloaded file, 0 if to_file is None Response.hash - md5 hash of the downloaded file, empty string if to_file is None """ exp_delay = [2**(x+1) for x in range(max_retries)] retry_count = 0 query = requests.Request(method, link) query = TWITTER_SESSION.prepare_request(query) LOGGER.debug("Making %s request to %s", method, link) if headers: query.headers.update(headers) while True: try: response = TWITTER_SESSION.send(query, allow_redirects=allow_redirects, stream=True, timeout=15) response.raise_for_status() if to_file: size = 0 md5_hash = md5() for chunk in response.iter_content(chunk_size=(1024**2)*3): to_file.write(chunk) md5_hash.update(chunk) size += len(chunk) #LOGGER.info("left=%s right=%s", size, response.headers["content-length"]) assert size == int(response.headers["content-length"]) return Response(response=response, size=size, hash=md5_hash.hexdigest()) return Response(response) except requests.HTTPError: LOGGER.error("Received HTTP error code %s", response.status_code) if response.status_code in [404] or retry_count >= max_retries: raise except requests.Timeout: LOGGER.error("Connection timed out") if retry_count >= max_retries: raise except requests.ConnectionError: LOGGER.error("Could not establish a new connection") #most likely a client-side connection error, do not retry raise except requests.RequestException as err: LOGGER.error("Unexpected request exception") LOGGER.error("request url = %s", query.url) LOGGER.error("request method = %s", query.method) LOGGER.error("request headers = %s", query.headers) LOGGER.error("request body = %s", query.body) raise err retry_count += 1 delay = exp_delay[retry_count-1] print(f"Retrying ({retry_count}/{max_retries}) in {delay}s") LOGGER.error("Retrying (%s/%s) in %ss", retry_count, max_retries, delay) time.sleep(delay)
e6f62914f89b0de314ce158a5e62c4a42af904f4
3,635,651
import os def get_mem_usage(): """returns percentage and vsz mem usage of this script""" pid = os.getpid() psout = os.popen( "ps -p %s u"%pid ).read() parsed_psout = psout.split("\n")[1].split() return float(parsed_psout[3]), int( parsed_psout[4] )
9d0060f435a1fb0d77a31ce946d7e46ffb7b4762
3,635,652
import os import requests def download_file(url, local_folder=None): """Downloads file pointed to by `url`. If `local_folder` is not supplied, downloads to the current folder. """ filename = os.path.basename(url) if local_folder: filename = os.path.join(local_folder, filename) # Download the file print("Downloading: " + url) response = requests.get(url, stream=True) if response.status_code != 200: raise Exception("download file failed with status code: %d, fetching url '%s'" % (response.status_code, url)) # Write the file to disk with open(filename, "wb") as handle: handle.write(response.content) return filename
2229239b4c54c9ef7858b3013cb78d00e0ea2ae0
3,635,653
def gist_ncar(range, **traits): """ Generator for the 'gist_ncar' colormap from GIST. """ _data = dict( red = [(0.0, 0.0, 0.0), (0.0050505050458014011, 0.0, 0.0), (0.010101010091602802, 0.0, 0.0), (0.015151515603065491, 0.0, 0.0), (0.020202020183205605, 0.0, 0.0), (0.025252524763345718, 0.0, 0.0), (0.030303031206130981, 0.0, 0.0), (0.035353533923625946, 0.0, 0.0), (0.040404040366411209, 0.0, 0.0), (0.045454546809196472, 0.0, 0.0), (0.050505049526691437, 0.0, 0.0), (0.0555555559694767, 0.0, 0.0), (0.060606062412261963, 0.0, 0.0), (0.065656565129756927, 0.0, 0.0), (0.070707067847251892, 0.0, 0.0), (0.075757578015327454, 0.0, 0.0), (0.080808080732822418, 0.0, 0.0), (0.085858583450317383, 0.0, 0.0), (0.090909093618392944, 0.0, 0.0), (0.095959596335887909, 0.0, 0.0), (0.10101009905338287, 0.0, 0.0), (0.10606060922145844, 0.0, 0.0), (0.1111111119389534, 0.0, 0.0), (0.11616161465644836, 0.0, 0.0), (0.12121212482452393, 0.0, 0.0), (0.12626262009143829, 0.0, 0.0), (0.13131313025951385, 0.0, 0.0), (0.13636364042758942, 0.0, 0.0), (0.14141413569450378, 0.0, 0.0), (0.14646464586257935, 0.0, 0.0), (0.15151515603065491, 0.0, 0.0), (0.15656565129756927, 0.0, 0.0), (0.16161616146564484, 0.0, 0.0), (0.1666666716337204, 0.0, 0.0), (0.17171716690063477, 0.0, 0.0), (0.17676767706871033, 0.0, 0.0), (0.18181818723678589, 0.0, 0.0), (0.18686868250370026, 0.0, 0.0), (0.19191919267177582, 0.0, 0.0), (0.19696970283985138, 0.0, 0.0), (0.20202019810676575, 0.0, 0.0), (0.20707070827484131, 0.0, 0.0), (0.21212121844291687, 0.0, 0.0), (0.21717171370983124, 0.0, 0.0), (0.2222222238779068, 0.0, 0.0), (0.22727273404598236, 0.0, 0.0), (0.23232322931289673, 0.0, 0.0), (0.23737373948097229, 0.0, 0.0), (0.24242424964904785, 0.0, 0.0), (0.24747474491596222, 0.0, 0.0), (0.25252524018287659, 0.0, 0.0), (0.25757575035095215, 0.0, 0.0), (0.26262626051902771, 0.0, 0.0), (0.26767677068710327, 0.0, 0.0), (0.27272728085517883, 0.0, 0.0), (0.27777779102325439, 0.0, 0.0), (0.28282827138900757, 0.0, 0.0), (0.28787878155708313, 0.0, 0.0), (0.29292929172515869, 0.0, 0.0), (0.29797980189323425, 0.0, 0.0), (0.30303031206130981, 0.0, 0.0), (0.30808082222938538, 0.0, 0.0), (0.31313130259513855, 0.0, 0.0), (0.31818181276321411, 0.0039215688593685627, 0.0039215688593685627), (0.32323232293128967, 0.043137256056070328, 0.043137256056070328), (0.32828283309936523, 0.08235294371843338, 0.08235294371843338), (0.3333333432674408, 0.11764705926179886, 0.11764705926179886), (0.33838382363319397, 0.15686275064945221, 0.15686275064945221), (0.34343433380126953, 0.19607843458652496, 0.19607843458652496), (0.34848484396934509, 0.23137255012989044, 0.23137255012989044), (0.35353535413742065, 0.27058824896812439, 0.27058824896812439), (0.35858586430549622, 0.30980393290519714, 0.30980393290519714), (0.36363637447357178, 0.3490196168422699, 0.3490196168422699), (0.36868685483932495, 0.38431373238563538, 0.38431373238563538), (0.37373736500740051, 0.40392157435417175, 0.40392157435417175), (0.37878787517547607, 0.41568627953529358, 0.41568627953529358), (0.38383838534355164, 0.42352941632270813, 0.42352941632270813), (0.3888888955116272, 0.43137255311012268, 0.43137255311012268), (0.39393940567970276, 0.44313725829124451, 0.44313725829124451), (0.39898988604545593, 0.45098039507865906, 0.45098039507865906), (0.40404039621353149, 0.45882353186607361, 0.45882353186607361), (0.40909090638160706, 0.47058823704719543, 0.47058823704719543), (0.41414141654968262, 0.47843137383460999, 0.47843137383460999), (0.41919192671775818, 0.49019607901573181, 0.49019607901573181), (0.42424243688583374, 0.50196081399917603, 0.50196081399917603), (0.42929291725158691, 0.52549022436141968, 0.52549022436141968), (0.43434342741966248, 0.54901963472366333, 0.54901963472366333), (0.43939393758773804, 0.57254904508590698, 0.57254904508590698), (0.4444444477558136, 0.60000002384185791, 0.60000002384185791), (0.44949495792388916, 0.62352943420410156, 0.62352943420410156), (0.45454546809196472, 0.64705884456634521, 0.64705884456634521), (0.4595959484577179, 0.67058825492858887, 0.67058825492858887), (0.46464645862579346, 0.69411766529083252, 0.69411766529083252), (0.46969696879386902, 0.72156864404678345, 0.72156864404678345), (0.47474747896194458, 0.7450980544090271, 0.7450980544090271), (0.47979798913002014, 0.76862746477127075, 0.76862746477127075), (0.4848484992980957, 0.7921568751335144, 0.7921568751335144), (0.48989897966384888, 0.81568628549575806, 0.81568628549575806), (0.49494948983192444, 0.83921569585800171, 0.83921569585800171), (0.5, 0.86274510622024536, 0.86274510622024536), (0.50505048036575317, 0.88627451658248901, 0.88627451658248901), (0.51010102033615112, 0.90980392694473267, 0.90980392694473267), (0.5151515007019043, 0.93333333730697632, 0.93333333730697632), (0.52020204067230225, 0.95686274766921997, 0.95686274766921997), (0.52525252103805542, 0.98039215803146362, 0.98039215803146362), (0.53030300140380859, 1.0, 1.0), (0.53535354137420654, 1.0, 1.0), (0.54040402173995972, 1.0, 1.0), (0.54545456171035767, 1.0, 1.0), (0.55050504207611084, 1.0, 1.0), (0.55555558204650879, 1.0, 1.0), (0.56060606241226196, 1.0, 1.0), (0.56565654277801514, 1.0, 1.0), (0.57070708274841309, 1.0, 1.0), (0.57575756311416626, 1.0, 1.0), (0.58080810308456421, 1.0, 1.0), (0.58585858345031738, 1.0, 1.0), (0.59090906381607056, 1.0, 1.0), (0.59595960378646851, 1.0, 1.0), (0.60101008415222168, 1.0, 1.0), (0.60606062412261963, 1.0, 1.0), (0.6111111044883728, 1.0, 1.0), (0.61616164445877075, 1.0, 1.0), (0.62121212482452393, 1.0, 1.0), (0.6262626051902771, 1.0, 1.0), (0.63131314516067505, 1.0, 1.0), (0.63636362552642822, 1.0, 1.0), (0.64141416549682617, 1.0, 1.0), (0.64646464586257935, 1.0, 1.0), (0.65151512622833252, 1.0, 1.0), (0.65656566619873047, 1.0, 1.0), (0.66161614656448364, 1.0, 1.0), (0.66666668653488159, 1.0, 1.0), (0.67171716690063477, 1.0, 1.0), (0.67676764726638794, 1.0, 1.0), (0.68181818723678589, 1.0, 1.0), (0.68686866760253906, 1.0, 1.0), (0.69191920757293701, 1.0, 1.0), (0.69696968793869019, 1.0, 1.0), (0.70202022790908813, 1.0, 1.0), (0.70707070827484131, 1.0, 1.0), (0.71212118864059448, 1.0, 1.0), (0.71717172861099243, 1.0, 1.0), (0.72222220897674561, 1.0, 1.0), (0.72727274894714355, 1.0, 1.0), (0.73232322931289673, 1.0, 1.0), (0.7373737096786499, 1.0, 1.0), (0.74242424964904785, 1.0, 1.0), (0.74747473001480103, 1.0, 1.0), (0.75252526998519897, 1.0, 1.0), (0.75757575035095215, 1.0, 1.0), (0.7626262903213501, 1.0, 1.0), (0.76767677068710327, 1.0, 1.0), (0.77272725105285645, 1.0, 1.0), (0.77777779102325439, 1.0, 1.0), (0.78282827138900757, 1.0, 1.0), (0.78787881135940552, 1.0, 1.0), (0.79292929172515869, 1.0, 1.0), (0.79797977209091187, 0.96470588445663452, 0.96470588445663452), (0.80303031206130981, 0.92549020051956177, 0.92549020051956177), (0.80808079242706299, 0.89019608497619629, 0.89019608497619629), (0.81313133239746094, 0.85098040103912354, 0.85098040103912354), (0.81818181276321411, 0.81568628549575806, 0.81568628549575806), (0.82323235273361206, 0.7764706015586853, 0.7764706015586853), (0.82828283309936523, 0.74117648601531982, 0.74117648601531982), (0.83333331346511841, 0.70196080207824707, 0.70196080207824707), (0.83838385343551636, 0.66666668653488159, 0.66666668653488159), (0.84343433380126953, 0.62745100259780884, 0.62745100259780884), (0.84848487377166748, 0.61960786581039429, 0.61960786581039429), (0.85353535413742065, 0.65098041296005249, 0.65098041296005249), (0.85858583450317383, 0.68235296010971069, 0.68235296010971069), (0.86363637447357178, 0.7137255072593689, 0.7137255072593689), (0.86868685483932495, 0.7450980544090271, 0.7450980544090271), (0.8737373948097229, 0.77254903316497803, 0.77254903316497803), (0.87878787517547607, 0.80392158031463623, 0.80392158031463623), (0.88383835554122925, 0.83529412746429443, 0.83529412746429443), (0.8888888955116272, 0.86666667461395264, 0.86666667461395264), (0.89393937587738037, 0.89803922176361084, 0.89803922176361084), (0.89898991584777832, 0.92941176891326904, 0.92941176891326904), (0.90404039621353149, 0.93333333730697632, 0.93333333730697632), (0.90909093618392944, 0.93725490570068359, 0.93725490570068359), (0.91414141654968262, 0.93725490570068359, 0.93725490570068359), (0.91919189691543579, 0.94117647409439087, 0.94117647409439087), (0.92424243688583374, 0.94509804248809814, 0.94509804248809814), (0.92929291725158691, 0.94509804248809814, 0.94509804248809814), (0.93434345722198486, 0.94901961088180542, 0.94901961088180542), (0.93939393758773804, 0.9529411792755127, 0.9529411792755127), (0.94444441795349121, 0.9529411792755127, 0.9529411792755127), (0.94949495792388916, 0.95686274766921997, 0.95686274766921997), (0.95454543828964233, 0.96078431606292725, 0.96078431606292725), (0.95959597826004028, 0.96470588445663452, 0.96470588445663452), (0.96464645862579346, 0.9686274528503418, 0.9686274528503418), (0.96969699859619141, 0.97254902124404907, 0.97254902124404907), (0.97474747896194458, 0.97647058963775635, 0.97647058963775635), (0.97979795932769775, 0.98039215803146362, 0.98039215803146362), (0.9848484992980957, 0.9843137264251709, 0.9843137264251709), (0.98989897966384888, 0.98823529481887817, 0.98823529481887817), (0.99494951963424683, 0.99215686321258545, 0.99215686321258545), (1.0, 0.99607843160629272, 0.99607843160629272)], green = [(0.0, 0.0, 0.0), (0.0050505050458014011, 0.035294119268655777, 0.035294119268655777), (0.010101010091602802, 0.074509806931018829, 0.074509806931018829), (0.015151515603065491, 0.10980392247438431, 0.10980392247438431), (0.020202020183205605, 0.14901961386203766, 0.14901961386203766), (0.025252524763345718, 0.18431372940540314, 0.18431372940540314), (0.030303031206130981, 0.22352941334247589, 0.22352941334247589), (0.035353533923625946, 0.25882354378700256, 0.25882354378700256), (0.040404040366411209, 0.29803922772407532, 0.29803922772407532), (0.045454546809196472, 0.3333333432674408, 0.3333333432674408), (0.050505049526691437, 0.37254902720451355, 0.37254902720451355), (0.0555555559694767, 0.36862745881080627, 0.36862745881080627), (0.060606062412261963, 0.3333333432674408, 0.3333333432674408), (0.065656565129756927, 0.29411765933036804, 0.29411765933036804), (0.070707067847251892, 0.25882354378700256, 0.25882354378700256), (0.075757578015327454, 0.21960784494876862, 0.21960784494876862), (0.080808080732822418, 0.18431372940540314, 0.18431372940540314), (0.085858583450317383, 0.14509804546833038, 0.14509804546833038), (0.090909093618392944, 0.10980392247438431, 0.10980392247438431), (0.095959596335887909, 0.070588238537311554, 0.070588238537311554), (0.10101009905338287, 0.035294119268655777, 0.035294119268655777), (0.10606060922145844, 0.0, 0.0), (0.1111111119389534, 0.074509806931018829, 0.074509806931018829), (0.11616161465644836, 0.14509804546833038, 0.14509804546833038), (0.12121212482452393, 0.21568627655506134, 0.21568627655506134), (0.12626262009143829, 0.28627452254295349, 0.28627452254295349), (0.13131313025951385, 0.36078432202339172, 0.36078432202339172), (0.13636364042758942, 0.43137255311012268, 0.43137255311012268), (0.14141413569450378, 0.50196081399917603, 0.50196081399917603), (0.14646464586257935, 0.57254904508590698, 0.57254904508590698), (0.15151515603065491, 0.64705884456634521, 0.64705884456634521), (0.15656565129756927, 0.71764707565307617, 0.71764707565307617), (0.16161616146564484, 0.7607843279838562, 0.7607843279838562), (0.1666666716337204, 0.78431373834609985, 0.78431373834609985), (0.17171716690063477, 0.80784314870834351, 0.80784314870834351), (0.17676767706871033, 0.83137255907058716, 0.83137255907058716), (0.18181818723678589, 0.85490196943283081, 0.85490196943283081), (0.18686868250370026, 0.88235294818878174, 0.88235294818878174), (0.19191919267177582, 0.90588235855102539, 0.90588235855102539), (0.19696970283985138, 0.92941176891326904, 0.92941176891326904), (0.20202019810676575, 0.9529411792755127, 0.9529411792755127), (0.20707070827484131, 0.97647058963775635, 0.97647058963775635), (0.21212121844291687, 0.99607843160629272, 0.99607843160629272), (0.21717171370983124, 0.99607843160629272, 0.99607843160629272), (0.2222222238779068, 0.99215686321258545, 0.99215686321258545), (0.22727273404598236, 0.99215686321258545, 0.99215686321258545), (0.23232322931289673, 0.99215686321258545, 0.99215686321258545), (0.23737373948097229, 0.98823529481887817, 0.98823529481887817), (0.24242424964904785, 0.98823529481887817, 0.98823529481887817), (0.24747474491596222, 0.9843137264251709, 0.9843137264251709), (0.25252524018287659, 0.9843137264251709, 0.9843137264251709), (0.25757575035095215, 0.98039215803146362, 0.98039215803146362), (0.26262626051902771, 0.98039215803146362, 0.98039215803146362), (0.26767677068710327, 0.98039215803146362, 0.98039215803146362), (0.27272728085517883, 0.98039215803146362, 0.98039215803146362), (0.27777779102325439, 0.9843137264251709, 0.9843137264251709), (0.28282827138900757, 0.9843137264251709, 0.9843137264251709), (0.28787878155708313, 0.98823529481887817, 0.98823529481887817), (0.29292929172515869, 0.98823529481887817, 0.98823529481887817), (0.29797980189323425, 0.99215686321258545, 0.99215686321258545), (0.30303031206130981, 0.99215686321258545, 0.99215686321258545), (0.30808082222938538, 0.99607843160629272, 0.99607843160629272), (0.31313130259513855, 0.99607843160629272, 0.99607843160629272), (0.31818181276321411, 0.99607843160629272, 0.99607843160629272), (0.32323232293128967, 0.97647058963775635, 0.97647058963775635), (0.32828283309936523, 0.95686274766921997, 0.95686274766921997), (0.3333333432674408, 0.93725490570068359, 0.93725490570068359), (0.33838382363319397, 0.92156863212585449, 0.92156863212585449), (0.34343433380126953, 0.90196079015731812, 0.90196079015731812), (0.34848484396934509, 0.88235294818878174, 0.88235294818878174), (0.35353535413742065, 0.86274510622024536, 0.86274510622024536), (0.35858586430549622, 0.84705883264541626, 0.84705883264541626), (0.36363637447357178, 0.82745099067687988, 0.82745099067687988), (0.36868685483932495, 0.80784314870834351, 0.80784314870834351), (0.37373736500740051, 0.81568628549575806, 0.81568628549575806), (0.37878787517547607, 0.83529412746429443, 0.83529412746429443), (0.38383838534355164, 0.85098040103912354, 0.85098040103912354), (0.3888888955116272, 0.87058824300765991, 0.87058824300765991), (0.39393940567970276, 0.89019608497619629, 0.89019608497619629), (0.39898988604545593, 0.90980392694473267, 0.90980392694473267), (0.40404039621353149, 0.92549020051956177, 0.92549020051956177), (0.40909090638160706, 0.94509804248809814, 0.94509804248809814), (0.41414141654968262, 0.96470588445663452, 0.96470588445663452), (0.41919192671775818, 0.9843137264251709, 0.9843137264251709), (0.42424243688583374, 1.0, 1.0), (0.42929291725158691, 1.0, 1.0), (0.43434342741966248, 1.0, 1.0), (0.43939393758773804, 1.0, 1.0), (0.4444444477558136, 1.0, 1.0), (0.44949495792388916, 1.0, 1.0), (0.45454546809196472, 1.0, 1.0), (0.4595959484577179, 1.0, 1.0), (0.46464645862579346, 1.0, 1.0), (0.46969696879386902, 1.0, 1.0), (0.47474747896194458, 1.0, 1.0), (0.47979798913002014, 1.0, 1.0), (0.4848484992980957, 1.0, 1.0), (0.48989897966384888, 1.0, 1.0), (0.49494948983192444, 1.0, 1.0), (0.5, 1.0, 1.0), (0.50505048036575317, 1.0, 1.0), (0.51010102033615112, 1.0, 1.0), (0.5151515007019043, 1.0, 1.0), (0.52020204067230225, 1.0, 1.0), (0.52525252103805542, 1.0, 1.0), (0.53030300140380859, 0.99215686321258545, 0.99215686321258545), (0.53535354137420654, 0.98039215803146362, 0.98039215803146362), (0.54040402173995972, 0.96470588445663452, 0.96470588445663452), (0.54545456171035767, 0.94901961088180542, 0.94901961088180542), (0.55050504207611084, 0.93333333730697632, 0.93333333730697632), (0.55555558204650879, 0.91764706373214722, 0.91764706373214722), (0.56060606241226196, 0.90588235855102539, 0.90588235855102539), (0.56565654277801514, 0.89019608497619629, 0.89019608497619629), (0.57070708274841309, 0.87450981140136719, 0.87450981140136719), (0.57575756311416626, 0.85882353782653809, 0.85882353782653809), (0.58080810308456421, 0.84313726425170898, 0.84313726425170898), (0.58585858345031738, 0.83137255907058716, 0.83137255907058716), (0.59090906381607056, 0.81960785388946533, 0.81960785388946533), (0.59595960378646851, 0.81176471710205078, 0.81176471710205078), (0.60101008415222168, 0.80000001192092896, 0.80000001192092896), (0.60606062412261963, 0.78823530673980713, 0.78823530673980713), (0.6111111044883728, 0.7764706015586853, 0.7764706015586853), (0.61616164445877075, 0.76470589637756348, 0.76470589637756348), (0.62121212482452393, 0.75294119119644165, 0.75294119119644165), (0.6262626051902771, 0.74117648601531982, 0.74117648601531982), (0.63131314516067505, 0.729411780834198, 0.729411780834198), (0.63636362552642822, 0.70980393886566162, 0.70980393886566162), (0.64141416549682617, 0.66666668653488159, 0.66666668653488159), (0.64646464586257935, 0.62352943420410156, 0.62352943420410156), (0.65151512622833252, 0.58039218187332153, 0.58039218187332153), (0.65656566619873047, 0.5372549295425415, 0.5372549295425415), (0.66161614656448364, 0.49411764740943909, 0.49411764740943909), (0.66666668653488159, 0.45098039507865906, 0.45098039507865906), (0.67171716690063477, 0.40392157435417175, 0.40392157435417175), (0.67676764726638794, 0.36078432202339172, 0.36078432202339172), (0.68181818723678589, 0.31764706969261169, 0.31764706969261169), (0.68686866760253906, 0.27450981736183167, 0.27450981736183167), (0.69191920757293701, 0.24705882370471954, 0.24705882370471954), (0.69696968793869019, 0.21960784494876862, 0.21960784494876862), (0.70202022790908813, 0.19607843458652496, 0.19607843458652496), (0.70707070827484131, 0.16862745583057404, 0.16862745583057404), (0.71212118864059448, 0.14509804546833038, 0.14509804546833038), (0.71717172861099243, 0.11764705926179886, 0.11764705926179886), (0.72222220897674561, 0.090196080505847931, 0.090196080505847931), (0.72727274894714355, 0.066666670143604279, 0.066666670143604279), (0.73232322931289673, 0.039215687662363052, 0.039215687662363052), (0.7373737096786499, 0.015686275437474251, 0.015686275437474251), (0.74242424964904785, 0.0, 0.0), (0.74747473001480103, 0.0, 0.0), (0.75252526998519897, 0.0, 0.0), (0.75757575035095215, 0.0, 0.0), (0.7626262903213501, 0.0, 0.0), (0.76767677068710327, 0.0, 0.0), (0.77272725105285645, 0.0, 0.0), (0.77777779102325439, 0.0, 0.0), (0.78282827138900757, 0.0, 0.0), (0.78787881135940552, 0.0, 0.0), (0.79292929172515869, 0.0, 0.0), (0.79797977209091187, 0.015686275437474251, 0.015686275437474251), (0.80303031206130981, 0.031372550874948502, 0.031372550874948502), (0.80808079242706299, 0.050980392843484879, 0.050980392843484879), (0.81313133239746094, 0.066666670143604279, 0.066666670143604279), (0.81818181276321411, 0.086274512112140656, 0.086274512112140656), (0.82323235273361206, 0.10588235408067703, 0.10588235408067703), (0.82828283309936523, 0.12156862765550613, 0.12156862765550613), (0.83333331346511841, 0.14117647707462311, 0.14117647707462311), (0.83838385343551636, 0.15686275064945221, 0.15686275064945221), (0.84343433380126953, 0.17647059261798859, 0.17647059261798859), (0.84848487377166748, 0.20000000298023224, 0.20000000298023224), (0.85353535413742065, 0.23137255012989044, 0.23137255012989044), (0.85858583450317383, 0.25882354378700256, 0.25882354378700256), (0.86363637447357178, 0.29019609093666077, 0.29019609093666077), (0.86868685483932495, 0.32156863808631897, 0.32156863808631897), (0.8737373948097229, 0.35294118523597717, 0.35294118523597717), (0.87878787517547607, 0.38431373238563538, 0.38431373238563538), (0.88383835554122925, 0.41568627953529358, 0.41568627953529358), (0.8888888955116272, 0.44313725829124451, 0.44313725829124451), (0.89393937587738037, 0.47450980544090271, 0.47450980544090271), (0.89898991584777832, 0.5058823823928833, 0.5058823823928833), (0.90404039621353149, 0.52941179275512695, 0.52941179275512695), (0.90909093618392944, 0.55294120311737061, 0.55294120311737061), (0.91414141654968262, 0.57254904508590698, 0.57254904508590698), (0.91919189691543579, 0.59607845544815063, 0.59607845544815063), (0.92424243688583374, 0.61960786581039429, 0.61960786581039429), (0.92929291725158691, 0.64313727617263794, 0.64313727617263794), (0.93434345722198486, 0.66274511814117432, 0.66274511814117432), (0.93939393758773804, 0.68627452850341797, 0.68627452850341797), (0.94444441795349121, 0.70980393886566162, 0.70980393886566162), (0.94949495792388916, 0.729411780834198, 0.729411780834198), (0.95454543828964233, 0.75294119119644165, 0.75294119119644165), (0.95959597826004028, 0.78039216995239258, 0.78039216995239258), (0.96464645862579346, 0.80392158031463623, 0.80392158031463623), (0.96969699859619141, 0.82745099067687988, 0.82745099067687988), (0.97474747896194458, 0.85098040103912354, 0.85098040103912354), (0.97979795932769775, 0.87450981140136719, 0.87450981140136719), (0.9848484992980957, 0.90196079015731812, 0.90196079015731812), (0.98989897966384888, 0.92549020051956177, 0.92549020051956177), (0.99494951963424683, 0.94901961088180542, 0.94901961088180542), (1.0, 0.97254902124404907, 0.97254902124404907)], blue = [(0.0, 0.50196081399917603, 0.50196081399917603), (0.0050505050458014011, 0.45098039507865906, 0.45098039507865906), (0.010101010091602802, 0.40392157435417175, 0.40392157435417175), (0.015151515603065491, 0.35686275362968445, 0.35686275362968445), (0.020202020183205605, 0.30980393290519714, 0.30980393290519714), (0.025252524763345718, 0.25882354378700256, 0.25882354378700256), (0.030303031206130981, 0.21176470816135406, 0.21176470816135406), (0.035353533923625946, 0.16470588743686676, 0.16470588743686676), (0.040404040366411209, 0.11764705926179886, 0.11764705926179886), (0.045454546809196472, 0.070588238537311554, 0.070588238537311554), (0.050505049526691437, 0.019607843831181526, 0.019607843831181526), (0.0555555559694767, 0.047058824449777603, 0.047058824449777603), (0.060606062412261963, 0.14509804546833038, 0.14509804546833038), (0.065656565129756927, 0.23921568691730499, 0.23921568691730499), (0.070707067847251892, 0.3333333432674408, 0.3333333432674408), (0.075757578015327454, 0.43137255311012268, 0.43137255311012268), (0.080808080732822418, 0.52549022436141968, 0.52549022436141968), (0.085858583450317383, 0.61960786581039429, 0.61960786581039429), (0.090909093618392944, 0.71764707565307617, 0.71764707565307617), (0.095959596335887909, 0.81176471710205078, 0.81176471710205078), (0.10101009905338287, 0.90588235855102539, 0.90588235855102539), (0.10606060922145844, 1.0, 1.0), (0.1111111119389534, 1.0, 1.0), (0.11616161465644836, 1.0, 1.0), (0.12121212482452393, 1.0, 1.0), (0.12626262009143829, 1.0, 1.0), (0.13131313025951385, 1.0, 1.0), (0.13636364042758942, 1.0, 1.0), (0.14141413569450378, 1.0, 1.0), (0.14646464586257935, 1.0, 1.0), (0.15151515603065491, 1.0, 1.0), (0.15656565129756927, 1.0, 1.0), (0.16161616146564484, 1.0, 1.0), (0.1666666716337204, 1.0, 1.0), (0.17171716690063477, 1.0, 1.0), (0.17676767706871033, 1.0, 1.0), (0.18181818723678589, 1.0, 1.0), (0.18686868250370026, 1.0, 1.0), (0.19191919267177582, 1.0, 1.0), (0.19696970283985138, 1.0, 1.0), (0.20202019810676575, 1.0, 1.0), (0.20707070827484131, 1.0, 1.0), (0.21212121844291687, 0.99215686321258545, 0.99215686321258545), (0.21717171370983124, 0.95686274766921997, 0.95686274766921997), (0.2222222238779068, 0.91764706373214722, 0.91764706373214722), (0.22727273404598236, 0.88235294818878174, 0.88235294818878174), (0.23232322931289673, 0.84313726425170898, 0.84313726425170898), (0.23737373948097229, 0.80392158031463623, 0.80392158031463623), (0.24242424964904785, 0.76862746477127075, 0.76862746477127075), (0.24747474491596222, 0.729411780834198, 0.729411780834198), (0.25252524018287659, 0.69019609689712524, 0.69019609689712524), (0.25757575035095215, 0.65490198135375977, 0.65490198135375977), (0.26262626051902771, 0.61568629741668701, 0.61568629741668701), (0.26767677068710327, 0.56470590829849243, 0.56470590829849243), (0.27272728085517883, 0.50980395078659058, 0.50980395078659058), (0.27777779102325439, 0.45098039507865906, 0.45098039507865906), (0.28282827138900757, 0.39215686917304993, 0.39215686917304993), (0.28787878155708313, 0.3333333432674408, 0.3333333432674408), (0.29292929172515869, 0.27843138575553894, 0.27843138575553894), (0.29797980189323425, 0.21960784494876862, 0.21960784494876862), (0.30303031206130981, 0.16078431904315948, 0.16078431904315948), (0.30808082222938538, 0.10588235408067703, 0.10588235408067703), (0.31313130259513855, 0.047058824449777603, 0.047058824449777603), (0.31818181276321411, 0.0, 0.0), (0.32323232293128967, 0.0, 0.0), (0.32828283309936523, 0.0, 0.0), (0.3333333432674408, 0.0, 0.0), (0.33838382363319397, 0.0, 0.0), (0.34343433380126953, 0.0, 0.0), (0.34848484396934509, 0.0, 0.0), (0.35353535413742065, 0.0, 0.0), (0.35858586430549622, 0.0, 0.0), (0.36363637447357178, 0.0, 0.0), (0.36868685483932495, 0.0, 0.0), (0.37373736500740051, 0.0, 0.0), (0.37878787517547607, 0.0, 0.0), (0.38383838534355164, 0.0, 0.0), (0.3888888955116272, 0.0, 0.0), (0.39393940567970276, 0.0, 0.0), (0.39898988604545593, 0.0, 0.0), (0.40404039621353149, 0.0, 0.0), (0.40909090638160706, 0.0, 0.0), (0.41414141654968262, 0.0, 0.0), (0.41919192671775818, 0.0, 0.0), (0.42424243688583374, 0.0039215688593685627, 0.0039215688593685627), (0.42929291725158691, 0.027450980618596077, 0.027450980618596077), (0.43434342741966248, 0.050980392843484879, 0.050980392843484879), (0.43939393758773804, 0.074509806931018829, 0.074509806931018829), (0.4444444477558136, 0.094117648899555206, 0.094117648899555206), (0.44949495792388916, 0.11764705926179886, 0.11764705926179886), (0.45454546809196472, 0.14117647707462311, 0.14117647707462311), (0.4595959484577179, 0.16470588743686676, 0.16470588743686676), (0.46464645862579346, 0.18823529779911041, 0.18823529779911041), (0.46969696879386902, 0.21176470816135406, 0.21176470816135406), (0.47474747896194458, 0.23529411852359772, 0.23529411852359772), (0.47979798913002014, 0.22352941334247589, 0.22352941334247589), (0.4848484992980957, 0.20000000298023224, 0.20000000298023224), (0.48989897966384888, 0.17647059261798859, 0.17647059261798859), (0.49494948983192444, 0.15294118225574493, 0.15294118225574493), (0.5, 0.12941177189350128, 0.12941177189350128), (0.50505048036575317, 0.10980392247438431, 0.10980392247438431), (0.51010102033615112, 0.086274512112140656, 0.086274512112140656), (0.5151515007019043, 0.062745101749897003, 0.062745101749897003), (0.52020204067230225, 0.039215687662363052, 0.039215687662363052), (0.52525252103805542, 0.015686275437474251, 0.015686275437474251), (0.53030300140380859, 0.0, 0.0), (0.53535354137420654, 0.0, 0.0), (0.54040402173995972, 0.0, 0.0), (0.54545456171035767, 0.0, 0.0), (0.55050504207611084, 0.0, 0.0), (0.55555558204650879, 0.0, 0.0), (0.56060606241226196, 0.0, 0.0), (0.56565654277801514, 0.0, 0.0), (0.57070708274841309, 0.0, 0.0), (0.57575756311416626, 0.0, 0.0), (0.58080810308456421, 0.0, 0.0), (0.58585858345031738, 0.0039215688593685627, 0.0039215688593685627), (0.59090906381607056, 0.0078431377187371254, 0.0078431377187371254), (0.59595960378646851, 0.011764706112444401, 0.011764706112444401), (0.60101008415222168, 0.019607843831181526, 0.019607843831181526), (0.60606062412261963, 0.023529412224888802, 0.023529412224888802), (0.6111111044883728, 0.031372550874948502, 0.031372550874948502), (0.61616164445877075, 0.035294119268655777, 0.035294119268655777), (0.62121212482452393, 0.043137256056070328, 0.043137256056070328), (0.6262626051902771, 0.047058824449777603, 0.047058824449777603), (0.63131314516067505, 0.054901961237192154, 0.054901961237192154), (0.63636362552642822, 0.054901961237192154, 0.054901961237192154), (0.64141416549682617, 0.050980392843484879, 0.050980392843484879), (0.64646464586257935, 0.043137256056070328, 0.043137256056070328), (0.65151512622833252, 0.039215687662363052, 0.039215687662363052), (0.65656566619873047, 0.031372550874948502, 0.031372550874948502), (0.66161614656448364, 0.027450980618596077, 0.027450980618596077), (0.66666668653488159, 0.019607843831181526, 0.019607843831181526), (0.67171716690063477, 0.015686275437474251, 0.015686275437474251), (0.67676764726638794, 0.011764706112444401, 0.011764706112444401), (0.68181818723678589, 0.0039215688593685627, 0.0039215688593685627), (0.68686866760253906, 0.0, 0.0), (0.69191920757293701, 0.0, 0.0), (0.69696968793869019, 0.0, 0.0), (0.70202022790908813, 0.0, 0.0), (0.70707070827484131, 0.0, 0.0), (0.71212118864059448, 0.0, 0.0), (0.71717172861099243, 0.0, 0.0), (0.72222220897674561, 0.0, 0.0), (0.72727274894714355, 0.0, 0.0), (0.73232322931289673, 0.0, 0.0), (0.7373737096786499, 0.0, 0.0), (0.74242424964904785, 0.031372550874948502, 0.031372550874948502), (0.74747473001480103, 0.12941177189350128, 0.12941177189350128), (0.75252526998519897, 0.22352941334247589, 0.22352941334247589), (0.75757575035095215, 0.32156863808631897, 0.32156863808631897), (0.7626262903213501, 0.41568627953529358, 0.41568627953529358), (0.76767677068710327, 0.50980395078659058, 0.50980395078659058), (0.77272725105285645, 0.60784316062927246, 0.60784316062927246), (0.77777779102325439, 0.70196080207824707, 0.70196080207824707), (0.78282827138900757, 0.79607844352722168, 0.79607844352722168), (0.78787881135940552, 0.89411765336990356, 0.89411765336990356), (0.79292929172515869, 0.98823529481887817, 0.98823529481887817), (0.79797977209091187, 1.0, 1.0), (0.80303031206130981, 1.0, 1.0), (0.80808079242706299, 1.0, 1.0), (0.81313133239746094, 1.0, 1.0), (0.81818181276321411, 1.0, 1.0), (0.82323235273361206, 1.0, 1.0), (0.82828283309936523, 1.0, 1.0), (0.83333331346511841, 1.0, 1.0), (0.83838385343551636, 1.0, 1.0), (0.84343433380126953, 1.0, 1.0), (0.84848487377166748, 0.99607843160629272, 0.99607843160629272), (0.85353535413742065, 0.98823529481887817, 0.98823529481887817), (0.85858583450317383, 0.9843137264251709, 0.9843137264251709), (0.86363637447357178, 0.97647058963775635, 0.97647058963775635), (0.86868685483932495, 0.9686274528503418, 0.9686274528503418), (0.8737373948097229, 0.96470588445663452, 0.96470588445663452), (0.87878787517547607, 0.95686274766921997, 0.95686274766921997), (0.88383835554122925, 0.94901961088180542, 0.94901961088180542), (0.8888888955116272, 0.94509804248809814, 0.94509804248809814), (0.89393937587738037, 0.93725490570068359, 0.93725490570068359), (0.89898991584777832, 0.93333333730697632, 0.93333333730697632), (0.90404039621353149, 0.93333333730697632, 0.93333333730697632), (0.90909093618392944, 0.93725490570068359, 0.93725490570068359), (0.91414141654968262, 0.93725490570068359, 0.93725490570068359), (0.91919189691543579, 0.94117647409439087, 0.94117647409439087), (0.92424243688583374, 0.94509804248809814, 0.94509804248809814), (0.92929291725158691, 0.94509804248809814, 0.94509804248809814), (0.93434345722198486, 0.94901961088180542, 0.94901961088180542), (0.93939393758773804, 0.9529411792755127, 0.9529411792755127), (0.94444441795349121, 0.9529411792755127, 0.9529411792755127), (0.94949495792388916, 0.95686274766921997, 0.95686274766921997), (0.95454543828964233, 0.96078431606292725, 0.96078431606292725), (0.95959597826004028, 0.96470588445663452, 0.96470588445663452), (0.96464645862579346, 0.9686274528503418, 0.9686274528503418), (0.96969699859619141, 0.97254902124404907, 0.97254902124404907), (0.97474747896194458, 0.97647058963775635, 0.97647058963775635), (0.97979795932769775, 0.98039215803146362, 0.98039215803146362), (0.9848484992980957, 0.9843137264251709, 0.9843137264251709), (0.98989897966384888, 0.98823529481887817, 0.98823529481887817), (0.99494951963424683, 0.99215686321258545, 0.99215686321258545), (1.0, 0.99607843160629272, 0.99607843160629272)], ) return ColorMapper.from_segment_map(_data, range=range, **traits)
f4ade6627bdaba25ae873c76b5b0970f7a650a70
3,635,654
def main(request, username): """ User > Main """ namespace = CacheHelper.ns('user:views:main', username=username) response_data = CacheHelper.io.get(namespace) if response_data is None: response_data, user = MainUserHelper.build_response(request, username) if response_data['status'] == 'not_found': raise Http404 response_data.update( UserTimelineHelper.build_response( request=request, user=user, ) ) CacheHelper.io.set(namespace, response_data, 30) return render(request, 'user/user_main.jade', response_data)
b605987f395b227a481012257c0c04add787cee5
3,635,655
import copy def checksum2(path): """Calculate the checksum of a TSV. The checksum of a TSV is calculated as the sum of the division between the only two numbers in each row that evenly divide each other. Arguments --------- path : str Path to a TSV file. Returns ------- The checksum of the file. """ lines = read_tsv(path) def _even_division(line): for i, a in enumerate(line): # Copy the line and remove the current element (left) line_copy = copy(line) line_copy.pop(i) for b in line_copy: if is_int(a / b): return int(a / b) if is_int(b / a): return int(b / a) raise ValueError('No even divisions found!') divisions = [_even_division(line) for line in lines] return sum(divisions)
01ccbfbd1a2c4258105d5bf9dd46395ebd50b080
3,635,656
import configparser def load_config(config_file_path): """ Load the config ini, parse settings to WORC Args: config_file_path (String): path of the .ini config file Returns: settings_dict (dict): dict with the loaded settings """ settings = configparser.ConfigParser() settings.read(config_file_path) print(settings.keys()) settings_dict = {'General': dict(), 'CrossValidation': dict(), 'Labels': dict(), 'HyperOptimization': dict(), 'Classification': dict(), 'SelectFeatGroup': dict(), 'Featsel': dict(), 'FeatureScaling': dict(), 'SampleProcessing': dict(), 'Imputation': dict(), 'Ensemble': dict()} settings_dict['General']['cross_validation'] =\ settings['General'].getboolean('cross_validation') settings_dict['General']['Joblib_ncores'] =\ settings['General'].getint('Joblib_ncores') settings_dict['General']['Joblib_backend'] =\ str(settings['General']['Joblib_backend']) settings_dict['General']['tempsave'] =\ settings['General'].getboolean('tempsave') settings_dict['Featsel']['Variance'] =\ [str(item).strip() for item in settings['Featsel']['Variance'].split(',')] settings_dict['Featsel']['SelectFromModel'] =\ [str(item).strip() for item in settings['Featsel']['SelectFromModel'].split(',')] settings_dict['Featsel']['GroupwiseSearch'] =\ [str(item).strip() for item in settings['Featsel']['GroupwiseSearch'].split(',')] settings_dict['Featsel']['UsePCA'] =\ [str(item).strip() for item in settings['Featsel']['UsePCA'].split(',')] settings_dict['Featsel']['PCAType'] =\ [str(item).strip() for item in settings['Featsel']['PCAType'].split(',')] settings_dict['Featsel']['StatisticalTestUse'] =\ [str(item).strip() for item in settings['Featsel']['StatisticalTestUse'].split(',')] settings_dict['Featsel']['StatisticalTestMetric'] =\ [str(item).strip() for item in settings['Featsel']['StatisticalTestMetric'].split(',')] settings_dict['Featsel']['StatisticalTestThreshold'] =\ [float(str(item).strip()) for item in settings['Featsel']['StatisticalTestThreshold'].split(',')] settings_dict['Featsel']['ReliefUse'] =\ [str(item).strip() for item in settings['Featsel']['ReliefUse'].split(',')] settings_dict['Featsel']['ReliefNN'] =\ [int(str(item).strip()) for item in settings['Featsel']['ReliefNN'].split(',')] settings_dict['Featsel']['ReliefSampleSize'] =\ [int(str(item).strip()) for item in settings['Featsel']['ReliefSampleSize'].split(',')] settings_dict['Featsel']['ReliefDistanceP'] =\ [int(str(item).strip()) for item in settings['Featsel']['ReliefDistanceP'].split(',')] settings_dict['Featsel']['ReliefNumFeatures'] =\ [int(str(item).strip()) for item in settings['Featsel']['ReliefNumFeatures'].split(',')] settings_dict['Imputation']['use'] =\ [str(item).strip() for item in settings['Imputation']['use'].split(',')] settings_dict['Imputation']['strategy'] =\ [str(item).strip() for item in settings['Imputation']['strategy'].split(',')] settings_dict['Imputation']['n_neighbors'] =\ [int(str(item).strip()) for item in settings['Imputation']['n_neighbors'].split(',')] settings_dict['General']['FeatureCalculator'] =\ str(settings['General']['FeatureCalculator']) # Feature selection options for key in settings['SelectFeatGroup'].keys(): settings_dict['SelectFeatGroup'][key] =\ [str(item).strip() for item in settings['SelectFeatGroup'][key].split(',')] # Classification options settings_dict['Classification']['fastr'] =\ settings['Classification'].getboolean('fastr') settings_dict['Classification']['fastr_plugin'] =\ str(settings['Classification']['fastr_plugin']) settings_dict['Classification']['classifiers'] =\ [str(item).strip() for item in settings['Classification']['classifiers'].split(',')] settings_dict['Classification']['max_iter'] =\ [int(str(item).strip()) for item in settings['Classification']['max_iter'].split(',')] # Specific SVM options settings_dict['Classification']['SVMKernel'] =\ [str(item).strip() for item in settings['Classification']['SVMKernel'].split(',')] settings_dict['Classification']['SVMC'] =\ [int(str(item).strip()) for item in settings['Classification']['SVMC'].split(',')] settings_dict['Classification']['SVMdegree'] =\ [int(str(item).strip()) for item in settings['Classification']['SVMdegree'].split(',')] settings_dict['Classification']['SVMcoef0'] =\ [int(str(item).strip()) for item in settings['Classification']['SVMcoef0'].split(',')] settings_dict['Classification']['SVMgamma'] =\ [int(str(item).strip()) for item in settings['Classification']['SVMgamma'].split(',')] # Specific RF options settings_dict['Classification']['RFn_estimators'] =\ [int(str(item).strip()) for item in settings['Classification']['RFn_estimators'].split(',')] settings_dict['Classification']['RFmin_samples_split'] =\ [int(str(item).strip()) for item in settings['Classification']['RFmin_samples_split'].split(',')] settings_dict['Classification']['RFmax_depth'] =\ [int(str(item).strip()) for item in settings['Classification']['RFmax_depth'].split(',')] # Specific LR options settings_dict['Classification']['LRpenalty'] =\ [str(item).strip() for item in settings['Classification']['LRpenalty'].split(',')] settings_dict['Classification']['LRC'] =\ [float(str(item).strip()) for item in settings['Classification']['LRC'].split(',')] # Specific LDA/QDA options settings_dict['Classification']['LDA_solver'] =\ [str(item).strip() for item in settings['Classification']['LDA_solver'].split(',')] settings_dict['Classification']['LDA_shrinkage'] =\ [int(str(item).strip()) for item in settings['Classification']['LDA_shrinkage'].split(',')] settings_dict['Classification']['QDA_reg_param'] =\ [int(str(item).strip()) for item in settings['Classification']['QDA_reg_param'].split(',')] # ElasticNet options settings_dict['Classification']['ElasticNet_alpha'] =\ [int(str(item).strip()) for item in settings['Classification']['ElasticNet_alpha'].split(',')] settings_dict['Classification']['ElasticNet_l1_ratio'] =\ [float(str(item).strip()) for item in settings['Classification']['ElasticNet_l1_ratio'].split(',')] # SGD (R) options settings_dict['Classification']['SGD_alpha'] =\ [int(str(item).strip()) for item in settings['Classification']['SGD_alpha'].split(',')] settings_dict['Classification']['SGD_l1_ratio'] =\ [float(str(item).strip()) for item in settings['Classification']['SGD_l1_ratio'].split(',')] settings_dict['Classification']['SGD_loss'] =\ [str(item).strip() for item in settings['Classification']['SGD_loss'].split(',')] settings_dict['Classification']['SGD_penalty'] =\ [str(item).strip() for item in settings['Classification']['SGD_penalty'].split(',')] # Naive Bayes options settings_dict['Classification']['CNB_alpha'] =\ [int(str(item).strip()) for item in settings['Classification']['CNB_alpha'].split(',')] # Cross validation settings settings_dict['CrossValidation']['N_iterations'] =\ settings['CrossValidation'].getint('N_iterations') settings_dict['CrossValidation']['test_size'] =\ settings['CrossValidation'].getfloat('test_size') # Genetic settings settings_dict['Labels']['label_names'] =\ [str(item).strip() for item in settings['Labels']['label_names'].split(',')] settings_dict['Labels']['modus'] =\ str(settings['Labels']['modus']) # Settings for hyper optimization settings_dict['HyperOptimization']['scoring_method'] =\ str(settings['HyperOptimization']['scoring_method']) settings_dict['HyperOptimization']['test_size'] =\ settings['HyperOptimization'].getfloat('test_size') settings_dict['HyperOptimization']['N_iter'] =\ settings['HyperOptimization'].getint('N_iterations') settings_dict['HyperOptimization']['n_jobspercore'] =\ int(settings['HyperOptimization']['n_jobspercore']) settings_dict['FeatureScaling']['scale_features'] =\ settings['FeatureScaling'].getboolean('scale_features') settings_dict['FeatureScaling']['scaling_method'] =\ str(settings['FeatureScaling']['scaling_method']) settings_dict['SampleProcessing']['SMOTE'] =\ [str(item).strip() for item in settings['SampleProcessing']['SMOTE'].split(',')] settings_dict['SampleProcessing']['SMOTE_ratio'] =\ [int(str(item).strip()) for item in settings['SampleProcessing']['SMOTE_ratio'].split(',')] settings_dict['SampleProcessing']['SMOTE_neighbors'] =\ [int(str(item).strip()) for item in settings['SampleProcessing']['SMOTE_neighbors'].split(',')] settings_dict['SampleProcessing']['Oversampling'] =\ [str(item).strip() for item in settings['SampleProcessing']['Oversampling'].split(',')] settings_dict['Ensemble']['Use'] =\ settings['Ensemble'].getboolean('Use') return settings_dict
3f85f3ccd9e635cb9ce021d424ed97e98cbfb75c
3,635,657
import pathlib def find_toplevel() -> pathlib.Path: """Get the toplevel git directory.""" return pathlib.Path(cmd_output(["rev-parse", "--show-toplevel"]).strip())
3d2cc723aadcec69b0d86b879e5d720f15d2c5da
3,635,658
def db20(value): """Convert voltage-like value to dB.""" return 20 * log10(np.abs(value))
ef261696d5fd4b3a0f841411e03fc9897a9a9a93
3,635,659
from typing import Any def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any: """Finds the python class with the given name and constructs it with the given arguments.""" return call_func_by_name(*args, func_name=class_name, **kwargs)
a666bf509513a8098b0c4deca58141a2957741fb
3,635,660
import csv def simple_file_scan(reader, bucket_name, region_name, file_name): """ Does an initial scan of the file, figuring out the file row count and which rows are too long/short Args: reader: the csv reader bucket_name: the bucket to pull from region_name: the region to pull from file_name: name of the file to pull Returns: file_row_count: the number of lines in the file short_rows: a list of row numbers that have too few fields long_rows: a list of rows that have too many fields """ # Count file rows: throws a File Level Error for non-UTF8 characters # Also getting short and long rows for formatting errors and pandas processing temp_file = open(reader.get_filename(region_name, bucket_name, file_name), encoding='utf-8') file_row_count = 0 header_length = 0 short_rows = [] long_rows = [] # Getting the delimiter header_line = temp_file.readline() delimiter = '|' if header_line.count('|') > header_line.count(',') else ',' temp_file.seek(0) for line in csv.reader(temp_file, delimiter=delimiter): if line: file_row_count += 1 line_length = len(line) # Setting the expected length for the file if header_length == 0: header_length = line_length # All lines that are shorter than they should be elif line_length < header_length: short_rows.append(file_row_count) # All lines that are longer than they should be elif line_length > header_length: long_rows.append(file_row_count) try: temp_file.close() except AttributeError: # File does not exist, and so does not need to be closed pass return file_row_count, short_rows, long_rows
ccd1aad870124a9b48f05bbe0d7fe510ae36bc33
3,635,661
import typing import torch import copy def random_plane(model: typing.Union[torch.nn.Module, ModelWrapper], metric: Metric, distance=1, steps=20, normalization='filter', deepcopy_model=False) -> np.ndarray: """ Returns the computed value of the evaluation function applied to the model or agent along a planar subspace of the parameter space defined by a start point and two randomly sampled directions. The models supplied can be either torch.nn.Module models, or ModelWrapper objects from the loss_landscapes library for more complex cases. That is, given a neural network model, whose parameters define a point in parameter space, and a distance, the loss is computed at 'steps' * 'steps' points along the plane defined by the two random directions, from the start point up to the maximum distance in both directions. Note that the dimensionality of the model parameters has an impact on the expected length of a uniformly sampled other in parameter space. That is, the more parameters a model has, the longer the distance in the random other's direction should be, in order to see meaningful change in individual parameters. Normalizing the direction other according to the model's current parameter values, which is supported through the 'normalization' parameter, helps reduce the impact of the distance parameter. In future releases, the distance parameter will refer to the maximum change in an individual parameter, rather than the length of the random direction other. Note also that a simple planar approximation with randomly sampled directions can produce misleading approximations of the loss landscape due to the scale invariance of neural networks. The sharpness/flatness of minima or maxima is affected by the scale of the neural network weights. For more details, see `https://arxiv.org/abs/1712.09913v3`. It is recommended to normalize the directions, preferably with the 'filter' option. The Metric supplied has to be a subclass of the loss_landscapes.metrics.Metric class, and must specify a procedure whereby the model passed to it is evaluated on the task of interest, returning the resulting quantity (such as loss, loss gradient, etc). :param model: the model defining the origin point of the plane in parameter space :param metric: function of form evaluation_f(model), used to evaluate model loss :param distance: maximum distance in parameter space from the start point :param steps: at how many steps from start to end the model is evaluated :param normalization: normalization of direction vectors, must be one of 'filter', 'layer', 'model' :param deepcopy_model: indicates whether the method will deepcopy the model(s) to avoid aliasing :return: 1-d array of loss values along the line connecting start and end models """ model_start_wrapper = wrap_model(copy.deepcopy(model) if deepcopy_model else model) start_point = model_start_wrapper.get_module_parameters() dir_one = rand_u_like(start_point) dir_two = orthogonal_to(dir_one) if normalization == 'model': dir_one.model_normalize_(start_point) dir_two.model_normalize_(start_point) elif normalization == 'layer': dir_one.layer_normalize_(start_point) dir_two.layer_normalize_(start_point) elif normalization == 'filter': dir_one.filter_normalize_(start_point) dir_two.filter_normalize_(start_point) elif normalization is None: pass else: raise AttributeError('Unsupported normalization argument. Supported values are model, layer, and filter') # scale to match steps and total distance dir_one.mul_(((start_point.model_norm() * distance) / steps) / dir_one.model_norm()) dir_two.mul_(((start_point.model_norm() * distance) / steps) / dir_two.model_norm()) # Move start point so that original start params will be in the center of the plot dir_one.mul_(steps / 2) dir_two.mul_(steps / 2) start_point.sub_(dir_one) start_point.sub_(dir_two) dir_one.truediv_(steps / 2) dir_two.truediv_(steps / 2) data_matrix = [] # evaluate loss in grid of (steps * steps) points, where each column signifies one step # along dir_one and each row signifies one step along dir_two. The implementation is again # a little convoluted to avoid constructive operations. Fundamentally we generate the matrix # [[start_point + (dir_one * i) + (dir_two * j) for j in range(steps)] for i in range(steps]. for i in range(steps): data_column = [] for j in range(steps): # for every other column, reverse the order in which the column is generated # so you can easily use in-place operations to move along dir_two if i % 2 == 0: start_point.add_(dir_two) data_column.append(metric(model_start_wrapper)) else: start_point.sub_(dir_two) data_column.insert(0, metric(model_start_wrapper)) data_matrix.append(data_column) start_point.add_(dir_one) return np.array(data_matrix)
8c431268a56a1ac929e9b5f272b476cbda64ab70
3,635,662
def _solarize_impl(pil_img, level): """Applies PIL Solarize to `pil_img`. Translate the image in the vertical direction by `level` number of pixels. Args: pil_img: Image in PIL object. level: Strength of the operation specified as an Integer from [0, `PARAMETER_MAX`]. Returns: A PIL Image that has had Solarize applied to it. """ level = int_parameter(level, min_max_vals.solarize.max) return ImageOps.solarize(pil_img, 256 - level)
d07952a043f61e401cc2c6fc858d43947c68019d
3,635,663
def detect_area(hsv_img,lower_color,upper_color,marker_id,min_size,draw=False): """Detects the contour of an object containing a marker based on color It always returns the smallest contour which still contains the marker The contour is detected using an image with hsv color space to be robust under different lighting conditions. If draw=True the systems draws all found contours as well as the current smalles one containing the marker onto hsv_img :param hsv_image: a Image in hsv color space in which the contours should be detected :type hsv_image: numpy array :param lower_color: a 3x1 array containing the lower boundary for the color detection :type lower_color: numpy array :param upper_color: a 3x1 array containing the upper boundary for the color detection :type upper_color: numpy array :param marker_id: the ID of a 4x4 aruco marker which identifies the object :type marker_id: scalar :param hsv_img: :param min_size: :param draw: (Default value = False) """ # color detection if lower_color[0] <=0: second_lower = lower_color second_lower[0] = 179+lower_color[0] second_upper = upper_color second_upper[0] = 179 lower_color[0] = 0 mask1 =cv2.inRange(hsv_img,lower_color,upper_color) mask2 =cv2.inRange(hsv_img,second_lower,second_upper) mask= mask1 | mask2 else: mask =cv2.inRange(hsv_img,lower_color,upper_color) #TODO carefull depending on opencv version the return may be different contours, hierachy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #marker detection: split_hsv = cv2.split(hsv_img) gray = split_hsv[2] center_dict = track_aruco_marker(gray,[marker_id]) center = center_dict[marker_id] if np.any(center != None): if draw == True: cv2.drawContours(hsv_img, contours, -1, (0,255,255),3) cv2.circle(hsv_img,(center[0],center[1]),7,(90,255,255),7) #TODO smallest contour should be real contour encompassing whole image row, col =hsv_img.shape[:2] smallest_contour = np.array([[0,0],[0,row],[col,row],[col,0]]) #TODO not needet with real contour contour_found = 0 for i in range(len(contours)): marker_in_contour = True marker_in_contour = cv2.pointPolygonTest(contours[i],tuple(center),False) > 0 marker_in_contour = marker_in_contour and cv2.contourArea(contours[i]) >= min_size if marker_in_contour: if cv2.contourArea(contours[i]) <= cv2.contourArea(smallest_contour): contour_found = 1 smallest_contour = contours[i] if contour_found == 1: if draw == True: cv2.drawContours(hsv_img, smallest_contour, -1, (90,255,255),6) return smallest_contour return None
3d6d86a285fd949c35025059c1b7cb4a6a644549
3,635,664
def payment_callback(): """้€š็”จๆ”ฏไป˜้กต้ขๅ›ž่ฐƒ""" data = request.params sn = data['sn'] result = data['result'] is_success = result == 'SUCCESS' handle = get_pay_notify_handle(TransactionType.PAYMENT, NotifyType.Pay.SYNC) if handle: # ๆ˜ฏๅฆๆˆๅŠŸ๏ผŒ่ฎขๅ•ๅท๏ผŒ_ๆ•ฐๆฎ return handle(is_success, sn) if is_success: return render_template('info.html', title='ๆ”ฏไป˜็ป“ๆžœ', msg='ๆ”ฏไป˜ๆˆๅŠŸ-่ฎขๅ•ๅท:{1}'.format(sn)) return render_template('info.html', title='ๆ”ฏไป˜็ป“ๆžœ', msg='ๆ”ฏไป˜ๅคฑ่ดฅ-่ฎขๅ•ๅท:{1}'.format(sn))
dc7c2dfaadf47c00fe355f1c82465c38e8bf7c7c
3,635,665
import os import sqlite3 def get_users_name(path): """ ็™ป้Œฒใ•ใ‚Œใฆใ„ใ‚‹ใƒฆใƒผใ‚ถๆƒ…ๅ ฑใฎๅ›žๅŽ Parameters ---------- path : str homeใƒ‡ใ‚ฃใƒฌใ‚ฏใƒˆใƒชใพใงใฎใƒ‘ใ‚น Returns ------- name_dict : dict ็™ป้Œฒใƒฆใƒผใ‚ถๆƒ…ๅ ฑใฎ่พžๆ›ธ """ path_db = os.path.join(path, 'data', 'list.db') name_list = [] with sqlite3.connect(path_db) as conn: conn.row_factory = sqlite3.Row cur = conn.cursor() cur.execute('select * from miyano') for row in cur: d = (row['number'], row['name']) name_list.append(d) cur.close() name_dict = dict(name_list) return name_dict
4a71b52a4dfa1e40eab62134795944b43a774a73
3,635,666
def get_rest_parameter_state(parameter_parsing_states): """ Gets the rest parameter from the given content if there is any. Parameters ---------- parameter_parsing_states `list` of ``ParameterParsingStateBase`` The created parameter parser state instances. Returns ------- parameter_parsing_state : ``ParameterParsingState``, `None` """ for parameter_parsing_state in parameter_parsing_states: if parameter_parsing_state.content_parser_parameter.is_rest: return parameter_parsing_state
e90d1ee848af7666a72d9d0d4fb74e3fedf496fa
3,635,667
def random_user_id() -> str: """Return random user id as string.""" return generate_random_id()
ee6a8299a81458bc20d4c0879a9ca4ab0741b790
3,635,668
import os import imp def import_plugin(name): """Tries to import given module""" path = os.path.join(BASE_PATH, "backends", "plugins", name + ".py") try: with open(path, 'rb') as f: try: plugin = imp.load_module( "p_" + name, f, name + '.py', ('.py', 'rb', imp.PY_SOURCE) ) except SyntaxError as e: raise ImportError(str(e)) except IOError as e: raise ImportError(str(e)) return plugin
2be34bd5138ac9e74544fcc335659fcfac02e860
3,635,669
from typing import List def batch_answer_same_question(question: str, contexts: List[str]) -> List[str]: """Answers the question with the given contexts (local mode). :param question: The question to answer. :type question: str :param contexts: The contexts to answer the question with. :type contexts: List[str] :return: The answers. :rtype: List[str] """ if _answerer is None: load_answerer() assert _answerer is not None tokenizer = get_answerer_tokenizer() prompts = [ answerer_prompt.format(question=question, context=context) for context in contexts ] information = { "prompt_length": max(len(tokenizer.encode(prompt)) for prompt in prompts) } parameters = format_parameters_to_local(answerer_parameters, information) response = _answerer(prompts, **parameters) return [ cut_on_stop(choices[0]["generated_text"], answerer_parameters["stop"]) for choices in response ]
3e013b793cebbb172c90d054e90bb830fbb7009f
3,635,670
def calculate_log_probs(conditioners, joint_dists): """ Calculates the marginal log probabilities of each feature's values and also the conditional log probabilities for the predecessors given in the predecessor map. """ log_marginals = [ N.log(joint_dists[f,f]) for f in xrange(len(conditioners)) ] log_conditionals = [ conditional_log_dist(feature, conditioner, joint_dists) for feature, conditioner in enumerate(conditioners) ] return log_marginals, log_conditionals
dd84cf8b76177aeeb90ca6205402e95b3686b421
3,635,671
import json def validate_response_code(response, expected_res): """ Function to validate work order response. Input Parameters : response, check_result Returns : err_cd""" # check expected key of test case check_result = {"error": {"code": 5}} check_result_key = list(check_result.keys())[0] # check response code if check_result_key in response: if "code" in check_result[check_result_key].keys(): if "code" in response[check_result_key].keys(): if (response[check_result_key]["code"] == expected_res): err_cd = 0 if expected_res == 0: logger.info('SUCCESS: Worker API response "%s"!!', response[check_result_key]["message"]) elif expected_res == 2: logger.info( 'Invalid parameter format in response "%s".', response[check_result_key]["message"]) elif expected_res == 5: logger.info('SUCCESS: WorkOrderSubmit response' ' key error and status (%s)!!\ \n', check_result[check_result_key]["code"]) else: err_cd = 1 logger.info( 'ERROR: Response did not contain expected code ' '%s.\n', check_result[check_result_key]["code"]) else: err_cd = 1 logger.info('ERROR: Response did not contain expected \ code %s. \n', check_result[check_result_key]["code"]) else: check_get_result = '''{"result": {"workOrderId": "", "workloadId": "", "workerId": "", "requesterId": "", "workerNonce": "", "workerSignature": "", "outData": ""}}''' check_result = json.loads(check_get_result) check_result_key = list(check_result.keys())[0] if check_result_key == "result": if (set(check_result["result"].keys()).issubset (response["result"].keys())): # Expected Keys : check_result["result"].keys() # Actual Keys : response["result"].keys() err_cd = 0 logger.info('SUCCESS: WorkOrderGetResult ' 'response has keys as expected!!') else: err_cd = 1 logger.info('ERROR: Response did not contain keys \ as expected in for test case. ') else: err_cd = 0 logger.info('No validation performed for the expected result \ in validate response. ') return err_cd
caf687ecffbe5deb9d8b458efede71f4c2c0b3be
3,635,672
from scipy.stats import gaussian_kde def _calc_density(x: np.ndarray, y: np.ndarray): """\ Function to calculate the density of cells in an embedding. """ # Calculate the point density xy = np.vstack([x, y]) z = gaussian_kde(xy)(xy) min_z = np.min(z) max_z = np.max(z) # Scale between 0 and 1 scaled_z = (z - min_z) / (max_z - min_z) return scaled_z
64ea42d14c933137ffb0efaf3a74d3ca1b4927b0
3,635,673
from x2paddle.op_mapper.pytorch2paddle import prim2code def gen_layer_code(graph, sub_layers, sub_layers_name, different_attrs=dict()): """ ๆ นๆฎsub_layers็”Ÿๆˆๅฏนๅบ”็š„Moduleไปฃ็ ใ€‚ Args: graph (x2paddle.core.program.PaddleGraph): ๆ•ดไธชPaddleๅ›พใ€‚ sub_layers (dict): ๅญๅ›พ็š„idๅ’Œๅ…ถๅฏนๅบ”layer็ป„ๆˆ็š„ๅญ—ๅ…ธใ€‚ sub_layers_name (str): ๅญๅ›พ็š„ๅๅญ—ใ€‚ different_attrs (dict/list): ๅฑžๆ€งๅญ—ๅ…ธ/ๅˆ—่กจ๏ผŒ่ฟ™ไบ›ๅฑžๆ€ง่กจๆ˜Žๅœจ่ขซ่ฐƒ็”จๆ—ถ่ต‹ไบˆไธๅŒๅ€ผใ€‚ """ def gen_codes(code_list, indent=0): """ ๆ นๆฎcode_list็”Ÿๆˆไปฃ็ ๆฎตใ€‚ Args: code_list (list): ไปฃ็ ่กŒ็ป„ๆˆ็š„listใ€‚ indent (int): ๆฏ่กŒ็ฉบๆ ผ็š„ๆ•ฐ้‡ใ€‚ Returns: str: ไปฃ็ ๆฎตใ€‚ """ indent_blank = " " * indent codes = [] for code_line in code_list: if code_line.strip() == "": codes.append('\n') else: codes.append(indent_blank + code_line + '\n') return codes def gen_head(inputs, different_attrs): # ็”ŸๆˆLayer็š„ๅคด้ƒจไปฃ็  head = gen_codes( ["class {}(paddle.nn.Layer):".format(sub_layers_name)], indent=0) # ็”Ÿๆˆinitๅ‡ฝๆ•ฐ็š„ๅคด้ƒจไปฃ็  diff_str_list = list() if isinstance(different_attrs, dict): for k, v in different_attrs.items(): diff_str_list.append("{}={}".format(k, v)) attrs_str = ", ".join(diff_str_list) else: attrs_str = ", ".join(different_attrs) init_func_head = \ gen_codes(["def __init__(self, {}):".format(attrs_str)], indent=1) + \ gen_codes(["super({}, self).__init__()".format(sub_layers_name)], indent=2) # ็”Ÿๆˆforwardๅ‡ฝๆ•ฐ็š„ๅคด้ƒจไปฃ็  input_data_name = ", ".join(inputs) forward_func_head = \ gen_codes(["def forward(self, {}):".format(input_data_name)], indent=1) return head, init_func_head, forward_func_head init_func = [] forward_func = [] cur_outputs = list() inputs = list() outputs = list() param_prefix_list = list() input_id = 0 for layer_id, layer in sub_layers.items(): if layer_id not in graph.edges_out: for index, output_name in enumerate(layer.outputs): if layer.kernel.startswith( "paddle.nn" ) and index == 0 and "functional" not in layer.kernel: continue if not output_name.startswith("x") or output_name in outputs \ or layer.kernel == "prim.assert": continue elif layer.kernel == "prim.if" or layer.kernel == "prim.loop": if index != 0: outputs.append(output_name) elif output_name not in outputs: outputs.append(output_name) continue for out_layer_id in graph.edges_out[layer_id]: if out_layer_id not in sub_layers: for index, output_name in enumerate(layer.outputs): if layer.kernel.startswith( "paddle.nn" ) and index == 0 and "functional" not in layer.kernel: continue if not output_name.startswith("x") or output_name in outputs \ or layer.kernel == "prim.assert": continue elif layer.kernel == "prim.if" or layer.kernel == "prim.loop": if index != 0: outputs.append(output_name) else: outputs.append(output_name) if layer.kernel == "prim.dict": is_set_item = True for out_layer_id in graph.edges_out[layer_id]: out_layer = sub_layers[out_layer_id] if out_layer.kernel != "prim.set_item": is_set_item = False break if is_set_item: outputs.append(layer.outputs[0]) no_output_count = 0 for i, (layer_id, layer) in enumerate(sub_layers.items()): _update_attrs(layer, different_attrs) if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel) or \ layer.kernel.startswith("custom_layer"): line = "self.{}".format(layer.outputs[0]) if layer.kernel.startswith("custom_layer"): line += " = x2paddle_nn.{}(".format(layer.kernel.split(":")[-1]) else: line += " = {}(".format(layer.kernel) for k, v in layer.attrs.items(): key_name = "{}_{}".format(layer.outputs[0], k) if key_name in different_attrs: line += "{}={}, ".format(k, key_name) else: line += "{}={}, ".format(k, v) line = line.strip(", ") line += ")" init_func.extend(gen_codes([line], indent=2)) if len(layer.outputs) == 1: line = layer.outputs[0] elif len(layer.outputs) == 2: line = layer.outputs[1] else: if layer.kernel == "paddle.nn.LSTM": line = "{}, ({})".format(layer.outputs[1], ', '.join(layer.outputs[-2:])) else: line = ','.join(layer.outputs[1:]) line += " = self.{}(".format(layer.outputs[0]) for k, v in layer.inputs.items(): if v not in cur_outputs and v not in inputs: inputs.append(v) line += "{}, ".format(v) line = line.strip(", ") line += ")" forward_func.extend(gen_codes([line], indent=2)) if len(layer.outputs) == 1: cur_outputs.append(layer.outputs[0]) else: cur_outputs.extend(layer.outputs[1:]) elif "prim" in layer.kernel: func_name = layer.kernel.replace(".", "_") if hasattr(prim2code, func_name): for k, v in layer.inputs.items(): if v not in cur_outputs and v not in inputs: inputs.append(v) func = getattr(prim2code, func_name) func( layer, indent=2, init_func=init_func, forward_func=forward_func, layer_id=layer_id, different_attrs=list(different_attrs.keys()) if isinstance(different_attrs, dict) else different_attrs) cur_outputs.extend(layer.outputs) else: raise Exception( "The kind {} in paddle model is not supported yet.".format( layer.kernel)) elif layer.kernel == "module": line = "self.{} = {}(".format(layer.outputs[0], layer.attrs["module"]) layer.attrs.pop("module") for k, v in layer.attrs.items(): key_name = "{}_{}".format(layer.outputs[0], k) if key_name in different_attrs: line += "{}={}, ".format(k, key_name) else: line += "{}={}, ".format(k, v) line = line.strip(", ") line += ")" init_func.extend(gen_codes([line], indent=2)) if len(layer.outputs) == 2: line = layer.outputs[1] else: line = ','.join(layer.outputs[1:]) line += " = self.{}(".format(layer.outputs[0]) for k, v in layer.inputs.items(): if v not in cur_outputs and v not in inputs: inputs.append(v) line += "{}, ".format(v) line = line.strip(", ") line += ")" forward_func.extend(gen_codes([line], indent=2)) cur_outputs.extend(layer.outputs[1:]) else: if layer.kernel == "paddle.to_tensor": v = layer.attrs["data"] if v not in cur_outputs and v not in inputs: inputs.append(v) if len(layer.outputs) == 1: line = layer.outputs[0] else: line = ','.join(layer.outputs) line += " = {}(".format(layer.kernel) for k, v in layer.inputs.items(): if isinstance(v, list): line += "{}=[{}], ".format(k, ", ".join(v)) for lv in v: if lv not in cur_outputs and lv not in inputs: inputs.append(lv) else: if v not in cur_outputs and v not in inputs: inputs.append(v) if k == "args": line += v else: line += "{}={}, ".format(k, v) for k, v in layer.attrs.items(): key_name = "{}_{}".format(layer.outputs[0], k) if key_name in different_attrs: line += "{}=self.{}, ".format(k, key_name) init_func.extend( gen_codes( ["self.{} = {}".format(key_name, key_name)], indent=2)) else: line += "{}={}, ".format(k, v) line = line.strip(", ") line += ")" if layer.kernel == "self.create_parameter": init_func.extend(gen_codes(["self." + line], indent=2)) forward_func.extend( gen_codes( [ "{} = self.{}".format(layer.outputs[0], layer.outputs[0]) ], indent=2)) else: forward_func.extend(gen_codes([line], indent=2)) cur_outputs.extend(layer.outputs) head, init_func_head, forward_func_head = gen_head(inputs, different_attrs) output_data_name = ", ".join(outputs) # remove to_tensor op forward_func_new = list() for line in forward_func: if "paddle.to_tensor" in line: continue forward_func_new.append(line) code_list = head + init_func_head + init_func + \ forward_func_head + forward_func_new + \ gen_codes(["return {}".format(output_data_name)], indent=2) code_str = "".join(code_list) return code_str
b4aac353a525405a8eecb82c3b719c419f1e938b
3,635,674
import torch def test_CreativeProject_integration_ask_tell_one_loop_kwarg_response_works(covars, model_type, train_X, train_Y, covars_proposed_iter, covars_sampled_iter, response_sampled_iter, kwarg_response, random_start, monkeypatch): """ test that a single loop of ask/tell works when providing response as kwarg to tell: creates a candidate, creates a model, stores covariates and response. Monkeypatch "_read_response_manual_input" from ._observe.py to circumvent manual input via builtins.input and provides response via kwargs """ # initialize the class cc = TuneSession(covars=covars, model=model_type, random_start=random_start) # set attributes on class (to simulate previous iterations of ask/tell functionality) cc.train_X = train_X cc.proposed_X = train_X cc.train_Y = train_Y cc.model["covars_proposed_iter"] = covars_proposed_iter cc.model["covars_sampled_iter"] = covars_sampled_iter cc.model["response_sampled_iter"] = response_sampled_iter if covars_proposed_iter > 0: cc.num_initial_random_points = 0 cc.random_sampling_method = None # monkeypatch "_read_covars_manual_input" candidate_tensor = torch.tensor([[tmp[0] for tmp in covars]], dtype=torch.double) def mock_read_covars_manual_input(additional_text): return candidate_tensor monkeypatch.setattr(cc, "_read_covars_manual_input", mock_read_covars_manual_input) # # monkeypatch "_read_response_manual_input" # resp_tensor = torch.tensor([[12]], dtype=torch.double) # # def mock_read_response_manual_input(additional_text): # return resp_tensor # monkeypatch.setattr(cc, "_read_response_manual_input", mock_read_response_manual_input) # run the ask method cc.ask() # run the tell method cc.tell(response_obs=kwarg_response) ### check for tell (no reason to assert for ask)### # assert that a new observation has been added for covariates if train_X is not None: assert cc.train_X.size()[0] == train_X.size()[0] + 1 else: assert cc.train_X.size()[0] == 1 # assert that the right elements have been added to the covariate observation for i in range(cc.train_X.size()[1]): assert cc.train_X[-1, i].item() == candidate_tensor[0, i].item() # assert that a new observation has been added for the response if train_Y is not None: assert cc.train_Y.size()[0] == train_Y.size()[0] + 1 else: assert cc.train_Y.size()[1] == 1 # assert that the right elements have been added to the response observation assert cc.train_Y[-1, 0].item() == kwarg_response[0,0].item() #resp_tensor[0, 0].item() ### check that acquisition function and model have been added # check that a model function has been assigned (should happen in all cases as part of tell) assert cc.model["model"] is not None # check that an acquisition function has been added (only if some data present in train_X, train_Y at first step) if train_X is not None: assert cc.acq_func["object"] is not None
aba248b5102013ea91f81c380faa922c18449cd9
3,635,675
import io def read_all_files(filenames): """Read all files into a StringIO buffer.""" return io.StringIO('\n'.join(open(f).read() for f in filenames))
efb2e3e8f35b2def5f1861ecf06d6e4135797ccf
3,635,676
from typing import Optional def calculate_distance(geojson, unit: Unit = Unit.meters) -> Optional[float]: """ Calculate distance of LineString or MultiLineString GeoJSON. Raises geojson_length.exc.GeojsonLengthException if input GeoJSON is invalid. :param geojson: GeoJSON feature of type LineString or MultiLineString :param unit: Unit of the result :return: distance in preferred units """ try: geometry = geojson.get("geometry", None) except AttributeError: raise GeojsonLengthException( "Invalid GeoJSON provided. Should be geojson.geometry.LineString," " geojson.geometry.MultiLineString or dict" ) if not geometry: raise GeojsonLengthException("Provided GeoJSON object has no geometry field") coordinates = geometry.get("coordinates", None) if not coordinates: raise GeojsonLengthException( "Provided GeoJSON object has no coordinates specified in geometry field" ) geometry_type = geometry.get("type", None) if not geometry_type: raise GeojsonLengthException( "Provided GeoJSON object has no type specified in geometry field" ) if geometry_type == "LineString": return calculate_line_string(coordinates, unit) elif geometry_type == "MultiLineString": distance = 0 for line in coordinates: distance += calculate_line_string(line, unit) return distance else: return None
5f019f6acf7ff49189ceab7531ffddeef5a15d03
3,635,677
def weighted_mse_loss(y_true, y_pred): """ apply weights on heatmap mse loss to only pick valid keypoint heatmap since y_true would be gt_heatmap with shape (batch_size, heatmap_size[0], heatmap_size[1], num_keypoints) we sum up the heatmap for each keypoints and check. Sum for invalid keypoint would be 0, so we can get a keypoint weights tensor with shape (batch_size, 1, 1, num_keypoints) and multiply to loss """ heatmap_sum = K.sum(K.sum(y_true, axis=1, keepdims=True), axis=2, keepdims=True) # keypoint_weights shape: (batch_size, 1, 1, num_keypoints), with # valid_keypoint = 1.0, invalid_keypoint = 0.0 keypoint_weights = 1.0 - K.cast(K.equal(heatmap_sum, 0.0), 'float32') return K.sqrt(K.mean(K.square((y_true - y_pred) * keypoint_weights)))
2ad89db78ec78d571a727002d6e62fc6de624965
3,635,678
def p_marketprices( i: pd.DatetimeIndex, avg: float = 100, year_amp: float = 0.30, week_amp: float = 0.05, peak_amp: float = 0.30, has_unit: bool = True, ) -> pd.Series: """Create a more or less realistic-looking forward price curve timeseries. Parameters ---------- i : pd.DatetimeIndex Timestamps for which to create prices. avg : float, optional (default: 100) Average price in Eur/MWh. year_amp : float, optional (default: 0.3) Yearly amplitude as fraction of average. If positive: winter prices > summer prices. week_amp : float, optional (default: 0.05) Weekly amplitude as fraction of average. If positive: midweek prices > weekend prices. peak_amp : float, optional (default: 0.3) Peak-offpeak amplitude as fraction of average. If positive: peak prices > offpeak prices. has_unit : bool, optional (default: True) If True, return Series with pint unit in Eur/MWh. Returns ------- pd.Series Price timeseries. """ if year_amp + week_amp + peak_amp > 1: raise ValueError( f"Sum of fractional amplitudes ({year_amp:.1%} and {week_amp:.1%} and {peak_amp:.1%}) should not exceed 100%." ) # year angle: 1jan0:00..1jan0:00 -> 0..2pi. But: uniform within month ya = i.map(lambda ts: ts.month) / 12 * np.pi * 2 # week angle: Sun0:00..Sun0:00 -> 0..2pi. But: uniform within day. wa = i.map(lambda ts: ts.weekday() + 1) / 7 * np.pi * 2 # peak fraction: -1 (middle of offpeak hours) .. 1 (middle of peak hours) if i.freq in ["H", "15T"]: b = np.array([0.5, 0.8, 1, 0.8, 0.5]) if i.freq == "15T": # repeat every value 4 times b = np.array([[bb, bb, bb, bb] for bb in b]).flatten() b = b[: len(i)] # slice in case i is very short pa = np.convolve(-1 + 2 * i.map(is_peak_hour), b / sum(b), mode="same") else: pa = np.zeros(len(i)) # Values yv = year_amp * np.cos(ya - 0.35) # max in feb wv = week_amp * np.cos(wa - 1.07) # max on tuesday pv = peak_amp * pa s = pd.Series(avg * (1 + yv + wv + pv), i, name="p") return s if not has_unit else s.astype("pint[Eur/MWh]")
db51ba10f6dda4f1df77833d29310a97411f0979
3,635,679
def read_flow(fn): """ Read .flo file in Middlebury format""" # Code adapted from: # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy # WARNING: this will work on little-endian architectures (eg Intel x86) only! with open(fn, 'rb') as f: magic = np.fromfile(f, np.float32, count=1) if 202021.25 != magic: print("Magic number incorrect. Invalid .flo file") return None else: w = np.fromfile(f, np.int32, count=1)[0] h = np.fromfile(f, np.int32, count=1)[0] # print 'Reading %d x %d flo file\n' % (w, h) data = np.fromfile(f, np.float32, count=2 * w * h) # Reshape data into 3D array (columns, rows, bands) # The reshape here is for visualization, the original code is (w,h,2) return np.resize(data, (int(h), int(w), 2))
e8b1d39a40b6650bdeb1ae8cf8b8ecd00b45c787
3,635,680
from typing import List def sma(grp_df: pd.DataFrame, cols: List[str], windows: List[int]) -> pd.DataFrame: """ Calculate the simple moving average. Parameters: ------- grp_df: pd.DataFrame The grouped dataframe. col: str window: list List of windows to take simple moving average. """ for window in windows: for col in cols: grp_df[f"sma_{col}_{window}"] = grp_df[col].rolling(window=window).mean() return grp_df
12c80365255893330d1ced017019c318f3683587
3,635,681
def add_anchor_tag(anchor_id, header): """ Add anchor tag to header. Input and output will look like below. Input: ## Task 02 - Do something Output: ## <a id="task02"></a> Task 02 - Do something [^](#toc) """ anchor = ANCHOR.format(anchor_id) # Replace the first space with anchor tag header_with_anchor = header.replace(' ', anchor, 1) return ' '.join([header_with_anchor.strip(), TOC])
1f58f985cc90d7cb8243a1d593eb89e329e7ccef
3,635,682
from typing import Callable def create_async_executor(query: Query) -> Callable: """Create async executor for query. Arguments: query: query for which executor should be created. Returns: Created async executor. """ executor = _OPERATION_TO_EXECUTOR[query.operation_type] return partial(executor, query)
0e13ae11e8096b807615c3cc8812dcd3e5acaed9
3,635,683
def batch_write_coverage(bed_fname, bam_fname, out_fname, by_count, processes): """Run coverage on one sample, write to file.""" cnarr = coverage.do_coverage(bed_fname, bam_fname, by_count, 0, processes) tabio.write(cnarr, out_fname) return out_fname
7b29ed2422181f8a42574368a22da8814693f7f9
3,635,684
def splitTargets(targetStr): """ break cmdargs into parts consisting of: 1) cmdargs are already stripped of their first arg 2) list of targets, including their number. Target examples: * staff * staff 2 * staff #2 * player * player #3 """ argStr = "" targetList = [] for arg in targetStr.split(" "): if argStr == "": # The first arg is the item argStr = arg elif isCountStr(arg): # if the first arg is a number targetList.append(argStr + " " + arg) argStr = "" else: # the last one is complete, this one is new targetList.append(argStr) argStr = arg if argStr != "": # if the last arg hasn't been appended targetList.append(argStr) return targetList
4b53a7db8d8b871b21b2d5b9044f1889be462ace
3,635,685
def get_model(): """ Epoch 50/50 3530/3530 [==============================] - 10s - loss: 8.5420e-04 - acc: 1.0000 - val_loss: 0.3877 - val_acc: 0.9083 1471/1471 [==============================] - 1s Train score: 0.00226768349974 Train accuracy: 1.0 """ model=Sequential() # Block 1 model.add(Conv2D(32, kernel_size=(3, 3),activation=None, input_shape=(75, 75, 3))) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(32, kernel_size=(3, 3),activation=None)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) # Block 2 model.add(Conv2D(64, kernel_size=(3, 3),activation=None)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(64, kernel_size=(3, 3),activation=None)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) # Block 3 model.add(Conv2D(128, kernel_size=(3, 3),activation=None)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(128, kernel_size=(3, 3),activation=None)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(128, kernel_size=(3, 3),activation=None)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) # You must flatten the data for the dense layers model.add(Flatten()) #Dense 1 model.add(Dense(2048, activation='relu')) model.add(Dropout(0.2)) #Dense 2 model.add(Dense(1024, activation='relu')) model.add(Dropout(0.2)) # Output model.add(Dense(1, activation="sigmoid")) optimizer = Adam(lr=0.0001, decay=0.0) model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) return model
d2a6baf0071c6d6e37cb9cf43e64e4ec2703b725
3,635,686
def path_inside_dir(path, directory): """ Returns True if the specified @path is inside @directory, performing component-wide comparison. Otherwise returns False. """ return ((directory == "" and path != "") or path.rstrip("/").startswith(directory.rstrip("/") + "/"))
30ad431f9115addd2041e4b6c9c1c8c563b93fe9
3,635,687
import sympy import os def tfi_chain(qubits, boundary_condition="closed", data_dir=None): """1D Transverse field Ising-model quantum data set. $$ H = - \sum_{i} \sigma_i^z \sigma_{i+1}^z - g\sigma_i^x $$ Contains 81 circuit parameterizations corresponding to the ground states of the 1D TFI chain for g in [0.2,1.8]. This dataset contains 81 datapoints. Each datapoint is represented by a circuit (`cirq.Circuit`), a label (Python `float`) a Hamiltonian (`cirq.PauliSum`) and some additional metadata. Each Hamiltonian in a datapoint is a 1D TFI chain with boundary condition `boundary_condition` on `qubits` whos order parameter dictates the value of label. The circuit in a datapoint prepares (an approximation to) the ground state of the Hamiltonian in the datapoint. Example usage: >>> qbs = cirq.GridQubit.rect(4, 1) >>> circuits, labels, pauli_sums, addinfo = ... tfq.datasets.tfi_chain(qbs, "closed") You can print the available order parameters >>> [info.g for info in addinfo] [0.20, 0.22, 0.24, ... ,1.76, 1.78, 1.8] and the circuit corresponding to the ground state for a certain order parameter >>> print(circuits[10]) โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€ ... (0, 0): โ”€โ”€โ”€Hโ”€โ”€โ”€ZZโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ZZโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ ... โ”‚ โ”‚ (1, 0): โ”€โ”€โ”€Hโ”€โ”€โ”€ZZ^0.761โ”€โ”€โ”€ZZโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€X^0.641โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ ... โ”‚ โ”‚ (2, 0): โ”€โ”€โ”€Hโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ZZ^0.761โ”€โ”€โ”€ZZโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ ... โ”‚ โ”‚ (3, 0): โ”€โ”€โ”€Hโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ZZ^0.761โ”€โ”€โ”€โ”€โ”€โ”€ZZ^0.761โ”€โ”€โ”€ ... โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ ... The labels indicate the phase of the system >>> labels[10] 0 Additionally, you can obtain the `cirq.PauliSum` representation of the Hamiltonian >>> print(pauli_sums[10]) -1.000*Z((0, 0))*Z((1, 0))-1.000*Z((1, 0))*Z((2, 0))-1.000*Z((2, 0))* Z((3, 0))-1.000*Z((0, 0))*Z((3, 0)) ... -0.400*X((2, 0))-0.400*X((3, 0)) The fourth output, `addinfo`, contains additional information about each instance of the system (see `tfq.datasets.spin_system.SpinSystem` ). For instance, you can print the ground state obtained from exact diagonalization >>> addinfo[10].gs [[-0.38852974+0.57092165j] [-0.04107317+0.06035461j] ... [-0.04107317+0.06035461j] [-0.38852974+0.57092165j]] with corresponding ground state energy >>> addinfo[10].gs_energy -4.169142950406478 You can also inspect the parameters >>> addinfo[10].params {"theta_0": 0.7614564630036476, "theta_1": 0.6774991338794768, "theta_2": 0.6407093304791429, "theta_3": 0.7335369771742435} and change them to experiment with different parameter values by using the unresolved variational circuit returned by tfichain >>> new_params = {} ... for symbol_name, value in addinfo[10].params.items(): ... new_params[symbol_name] = 0.5 * value >>> new_params {"theta_0": 0.3807282315018238, "theta_1": 0.3387495669397384, "theta_2": 0.32035466523957146, "theta_3": 0.36676848858712174} >>> new_circuit = cirq.resolve_parameters(addinfo[10].var_circuit, ... new_params) >>> print(new_circuit) โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€ ... (0, 0): โ”€โ”€โ”€Hโ”€โ”€โ”€ZZโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ZZโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ ... โ”‚ โ”‚ (1, 0): โ”€โ”€โ”€Hโ”€โ”€โ”€ZZ^0.761โ”€โ”€โ”€ZZโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€X^0.32โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ ... โ”‚ โ”‚ (2, 0): โ”€โ”€โ”€Hโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ZZ^0.761โ”€โ”€โ”€ZZโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ ... โ”‚ โ”‚ (3, 0): โ”€โ”€โ”€Hโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ZZ^0.761โ”€โ”€โ”€โ”€โ”€โ”€ZZ^0.761โ”€โ”€โ”€ ... โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ ... Args: qubits: Python `lst` of `cirq.GridQubit`s. Supported number of spins are [4, 8, 12, 16]. boundary_condition: Python `str` indicating the boundary condition of the chain. Supported boundary conditions are ["closed"]. data_dir: Optional Python `str` location where to store the data on disk. Defaults to `/tmp/.keras`. Returns: A Python `lst` cirq.Circuit of depth len(qubits) / 2 with resolved parameters. A Python `lst` of labels, 0, for the ferromagnetic phase (`g<1`), 1 for the critical point (`g==1`) and 2 for the paramagnetic phase (`g>1`). A Python `lst` of `cirq.PauliSum`s. A Python `lst` of `namedtuple` instances containing the following fields: - `g`: Numpy `float` order parameter. - `gs`: Complex `np.ndarray` ground state wave function from exact diagonalization. - `gs_energy`: Numpy `float` ground state energy from exact diagonalization. - `res_energy`: Python `float` residual between the circuit energy and the exact energy from exact diagonalization. - `fidelity`: Python `float` overlap between the circuit state and the exact ground state from exact diagonalization. - `params`: Dict with Python `str` keys and Numpy`float` values. Contains $M \times P $ parameters. Here $M$ is the number of parameters per circuit layer and $P$ the circuit depth. - `var_circuit`: Variational `cirq.Circuit` quantum circuit with unresolved Sympy parameters. """ supported_n = [4, 8, 12, 16] supported_bc = ["closed"] if any(isinstance(q, list) for q in qubits): raise TypeError("qubits must be a one-dimensional list") if not all(isinstance(q, cirq.GridQubit) for q in qubits): raise TypeError("qubits must be a list of cirq.GridQubit objects.") nspins = len(qubits) depth = nspins // 2 if nspins not in supported_n: raise ValueError("Supported number of spins are {}, received {}".format( supported_n, nspins)) if boundary_condition not in supported_bc: raise ValueError( "Supported boundary conditions are {}, received {}".format( supported_bc, boundary_condition)) data_path = _download_spin_data('TFI_chain', boundary_condition, nspins, data_dir) name_generator = unique_name() # 2 * N/2 parameters. symbol_names = [next(name_generator) for _ in range(nspins)] symbols = [sympy.Symbol(name) for name in symbol_names] # Define the circuit. circuit = cirq.Circuit(cirq.H.on_each(qubits)) for d in range(depth): circuit.append( cirq.ZZ(q1, q2)**(symbols[d]) for q1, q2 in zip(qubits, qubits[1:])) if boundary_condition == "closed": circuit.append(cirq.ZZ(qubits[nspins - 1], qubits[0])**(symbols[d])) circuit.append(cirq.X(q1)**(symbols[d + depth]) for q1 in qubits) # Initiate lists. resolved_circuits = [] hamiltonians = [] order_parameters = [] additional_info = [] labels = [] # Load the data and append to the lists. for i, directory in enumerate(x for x in os.listdir(data_path)): # The folders are named according to the order value data they contain. g = float(directory) with open(os.path.join(data_path, directory, "stats.txt"), "r") as file: lines = file.readlines() res_e = float(lines[0].split("=")[1].strip("\n")) fidelity = float(lines[2].split("=")[1].strip("\n")) order_parameters.append(g) params = np.load(os.path.join(data_path, directory, "params.npy")) \ / np.pi # Parameters are stored as np.float32, but cirq expects np.float64 # See https://github.com/quantumlib/Cirq/issues/3359 params = params.astype(np.float) additional_info.append( SpinSystemInfo(g=g, gs=np.load( os.path.join(data_path, directory, "groundstate.npy"))[:, 0], gs_energy=np.load( os.path.join(data_path, directory, "energy.npy"))[0], res_energy=res_e, fidelity=fidelity, params=dict(zip(symbol_names, params.flatten())), var_circuit=circuit)) # Resolve the circuit parameters. resolved_circuit = cirq.resolve_parameters(circuit, additional_info[i].params) resolved_circuits.append(resolved_circuit) # Make the PauliSum. paulisum = sum( -cirq.Z(q1) * cirq.Z(q2) for q1, q2 in zip(qubits, qubits[1:])) if boundary_condition == "closed": paulisum += -cirq.Z(qubits[0]) * cirq.Z(qubits[-1]) paulisum += -order_parameters[i] * sum(cirq.X(q) for q in qubits) hamiltonians.append(paulisum) # Set labels for the different phases. if order_parameters[i] < 1.0: labels.append(0) elif order_parameters[i] == 1.0: labels.append(1) else: labels.append(2) # Make sure that the data is ordered from g=0.2 to g=1.8. _, resolved_circuits, labels, hamiltonians, additional_info = zip(*sorted( zip(order_parameters, resolved_circuits, labels, hamiltonians, additional_info))) return resolved_circuits, labels, hamiltonians, additional_info
f3e352fb7451720575bca3c8eb574de474707fb5
3,635,688
def generate_sd_grid_mapping_traj(ipath_sd, n_top_grid, ipath_top_grid, ipath_grid_block_gps_range, odir_sd, mapping_rate=1, mapping_bais=None): """generate the gird-mapping traj for SD """ def random_sampling(grid_range): """generate a sample point within a grid range """ x = np.random.uniform(grid_range[0][0], grid_range[1][0]) y = np.random.uniform(grid_range[0][1], grid_range[1][1]) return x, y # for pep8 if mapping_bais is None: mapping_bais = {'lat': 0, 'lon': 0} # privacy budget with open(ipath_sd) as fr_sd: sd = [eval(point.replace('\n', '')) for point in fr_sd.readlines()] # C = n_top_grid ** 2 # with open(ipath_top_grid) as fr_top_grid: # M = eval(fr_top_grid.readline()) with open(ipath_grid_block_gps_range) as fr_top_grid_block_gps_range: fstr = fr_top_grid_block_gps_range.readlines() grid_block_gps_range = eval(fstr[0]) # top_grid_block_gps_range = eval(fstr[1]) reverse_mapped_trajs = [] for traj in sd: reverse_mapped_trajs.append([random_sampling(grid_block_gps_range[i]) for i in traj]) # write to files fcount = 0 p = utils.ProgressBar(len(reverse_mapped_trajs), '็”Ÿๆˆ่„ฑๆ•ๆ•ฐๆฎ้›†') for i in range(len(reverse_mapped_trajs)): p.update(i) with open(odir_sd + '/sd_traj' + str(fcount) + '.txt', 'w') as fw_traj: for point in reverse_mapped_trajs[i]: # mapping point = [point[0]/mapping_rate+mapping_bais['lat'], point[1]/mapping_rate+mapping_bais['lon']] fw_traj.write(str(point[0])+','+str(point[1])+'\n') fcount += 1
dbc70465e6a66cb967b697559f598d0e8c2ece90
3,635,689
import os def get_2bit_path(db_opt): """Check if alias and return a path to 2bit file.""" if os.path.isfile(db_opt): # not an alias return db_opt # there is nothing to do aliased = two_bit_templ.format(db_opt) # check that it's a file die(f"Error! Cannot find {aliased} file", 1) if not os.path.isfile(aliased) else None return aliased
4576b90df21e8996774e93ab8cf28023d025b85d
3,635,690
def butterworth_type_filter(frequency, highcut_frequency, order=2): """ Butterworth low pass filter Parameters ---------- highcut_frequency: float high-cut frequency for the low pass filter fs: float sampling rate, 1./ dt, (default = 1MHz) period: period of the signal (e.g. 25Hz base frequency, 0.04s) order: int The order of the butterworth filter Returns ------- frequency, h: ndarray, ndarray Filter values (`h`) at frequencies (`frequency`) are provided. """ # Nyquist frequency h = 1.0 / (1 + 1j * (frequency / highcut_frequency)) ** order highcut_frequency = 300 * 1e3 h *= 1.0 / (1 + 1j * (frequency / highcut_frequency)) ** 1 return h
f8ff570d209560d65b4ccc9fdfd2d26ec8a12d35
3,635,691
import sys def draw_mesh( # Main input edof, coord, dof, element_type, # Other parameters scale = 0.02, alpha = 1, render_nodes = True, color = 'yellow', offset = [0, 0, 0], # BC- & F-marking bcPrescr = None, bc = None, bc_color = 'red', fPrescr = None, f = None, f_color = 'blue6', eq_els = None, eq = None, # Element-specific input spring = True, nseg = 2 ): """ Routine for undisplaced mesh for spring, bar, flow, solid, beam or plate elements. :param array edof: Element topology by degrees of freedom [nel x (n_dofs_per_node)|(n_dofs_per_node+1)*n_nodes ] :param array coord: Nodal coordinates [number of nodes x 3] :param array dof: Global degrees of freedom [number of nodes x degrees of freedom per node] :param int element_type: Element type [1-6] :param float scale: Element scale, nodes are scaled 50% larger than this value :param float alpha: Element and node transparency [0-1] :param bool render_nodes: If True, nodes are rendered :param str color: Element color :param list offset: Offset actors in 3D space [x, y, z] :param array bcPrescr: Degrees of freedom with boundary conditions [number of degrees of freedom with BCs x 1] :param array bc: Boundary conditions [number of degrees of freedom with BCs x 1] :param str bc_color: Color for nodes with boundary conditions applied :param array fPrescr: Degrees of freedom with forces [number of degrees of freedom with forces x 1] :param array f: Forces at degrees of freedom [number of degrees of freedom with forces x 1] :param str f_color: Color for nodes/elements with forces applied :param array eq_els: Element numbers where forces are applied [number of elements with forces x 1] :param array eq: Element force vector [number of elements with forces x 1 | number of elements with forces x 3] :param bool spring: If True, renders spring elements as coil springs :param int nseg: Number of points along beam elements for segmenting [number of segments + 1] :return array mesh: List of mesh actors """ app = init_app() plot_window = VedoPlotWindow.instance().plot_window if np.size(coord, axis = 1) == 1: coord = np.append(coord, np.zeros((np.size(coord, axis = 0),1)), axis=1) coord = np.append(coord, np.zeros((np.size(coord, axis = 0),1)), axis=1) elif np.size(coord, axis = 1) == 2: coord = np.append(coord, np.zeros((np.size(coord, axis = 0),1)), axis=1) if 1 <= element_type <= 6: nel, ndof_per_el, nnode, ndim, ndof, ndof_per_n = vdu.check_input(edof,coord,dof,element_type,nseg=nseg) else: print("draw_mesh: Invalid element type, please declare 'element_type'. The element types are:\n 1 - Spring\n 2 - Bar\n 3 - Flow\n 4 - Solid\n 5 - Beam\n 6 - Plate") sys.exit() # OUTPUT FROM check_input # Number of elements: nel # Number of degrees of freedom per element: ndof_per_el # Number of nodes: nnode # Number of dimensions: ndim # Number of degrees of freedom: ndof # Number of degrees of freedom per node: ndof_per_n # Number of displacements: ndisp # Element/nodal values: val # Elements w/ a length (spring, bar & beam) if element_type == 1 or element_type == 2 or element_type == 5: ncoord = np.size(coord, axis = 0) nel = np.size(edof, axis = 0) elements = [] coord[:] += offset if element_type == 5: for i in range(nel): eq_dict = {} indx = 0 if isinstance(eq_els, np.ndarray): for j in eq_els: eq_dict[j[0]] = eq[indx][0] indx += 1 for i in range(nel): coord1,coord2 = vdu.get_coord_from_edof(edof[i,:],dof,element_type) if element_type == 1 and spring == True: element = v.Spring([coord[coord1,0],coord[coord1,1],coord[coord1,2]],[coord[coord2,0],coord[coord2,1],coord[coord2,2]],r=1.5*scale,c=color).alpha(alpha) element.name = f"Spring element {i+1}" elements.append(element) elif element_type == 1 and spring == False: element = v.Cylinder([[coord[coord1,0],coord[coord1,1],coord[coord1,2]],[coord[coord2,0],coord[coord2,1],coord[coord2,2]]],r=scale,res=4,c=color).alpha(alpha) element.name = f"Spring element {i+1}" elements.append(element) elif element_type == 2: bar = v.Cylinder([[coord[coord1,0],coord[coord1,1],coord[coord1,2]],[coord[coord2,0],coord[coord2,1],coord[coord2,2]]],r=scale,res=4,c=color).alpha(alpha) bar.name = f"Bar element {i+1}" elements.append(bar) # Segmented beam elif element_type == 5 and nseg > 2: steps = np.float32(1/(nseg-1)) dx = (coord[coord2,0]-coord[coord1,0])*steps dy = (coord[coord2,1]-coord[coord1,1])*steps dz = (coord[coord2,2]-coord[coord1,2])*steps for j in range(nseg-1): x1 = coord[coord1,0]+dx*j y1 = coord[coord1,1]+dy*j z1 = coord[coord1,2]+dz*j x2 = coord[coord1,0]+dx*(j+1) y2 = coord[coord1,1]+dy*(j+1) z2 = coord[coord1,2]+dz*(j+1) if np.any(np.isin(eq_els, i, assume_unique=True)) == True: beam = v.Cylinder([[x1,y1,z1],[x2,y2,z2]],r=scale,res=4,c=f_color).alpha(alpha) else: beam = v.Cylinder([[x1,y1,z1],[x2,y2,z2]],r=scale,res=4,c=color).alpha(alpha) if i in eq_dict: beam.name = f"Beam element {i+1}, seg. {j+1}, Forces: [{eq_dict[i][0]}, {eq_dict[i][1]}, {eq_dict[i][2]}, {eq_dict[i][3]}]" else: beam.name = f"Beam element {i+1}, seg. {j+1}" elements.append(beam) elif element_type == 5: if np.any(np.isin(eq_els, i, assume_unique=True)) == True: beam = v.Cylinder([[coord[coord1,0],coord[coord1,1],coord[coord1,2]],[coord[coord2,0],coord[coord2,1],coord[coord2,2]]],r=scale,res=4,c=f_color).alpha(alpha) else: beam = v.Cylinder([[coord[coord1,0],coord[coord1,1],coord[coord1,2]],[coord[coord2,0],coord[coord2,1],coord[coord2,2]]],r=scale,res=4,c=color).alpha(alpha) if i in eq_dict: beam.name = f"Beam element {i+1}, Forces: [{eq_dict[i][0]}, {eq_dict[i][1]}, {eq_dict[i][2]}, {eq_dict[i][3]}]" else: beam.name = f"Beam element {i+1}" elements.append(beam) if render_nodes == True: if element_type == 1 and spring == False: nodes = vdu.get_node_elements(coord,scale,alpha,dof,bcPrescr,bc,bc_color,fPrescr,f,f_color,dofs_per_node=1) elif element_type == 1: nodes = vdu.get_node_elements(coord,scale*0.5,alpha,dof,bcPrescr,bc,bc_color,fPrescr,f,f_color,dofs_per_node=1) elif element_type == 2: nodes = vdu.get_node_elements(coord,scale,alpha,dof,bcPrescr,bc,bc_color,fPrescr,f,f_color,dofs_per_node=3) elif element_type == 5: nodes = vdu.get_node_elements(coord,scale,alpha,dof,bcPrescr,bc,bc_color,fPrescr,f,f_color,dofs_per_node=6) plot_window.meshes[plot_window.fig].extend(elements) plot_window.nodes[plot_window.fig].extend(nodes) else: plot_window.meshes[plot_window.fig].extend(elements) return elements # Elements w/ a volume/surface (flow, solid & plate) elif element_type == 3 or element_type == 4 or element_type == 6: meshes = [] nel = np.size(edof, axis = 0) coord[:] += offset for i in range(nel): eq_dict = {} indx = 0 if isinstance(eq_els, np.ndarray): for j in eq_els: eq_dict[j[0]] = eq[indx][0] indx += 1 if element_type == 3: coords = vdu.get_coord_from_edof(edof[i,:],dof,3) elif element_type == 4: coords = vdu.get_coord_from_edof(edof[i,:],dof,4) elif element_type == 6: coords = vdu.get_coord_from_edof(edof[i,:],dof,6) new_coord = np.zeros([8,3]) new_coord[0,0] = coord[coords[0],0] new_coord[1,0] = coord[coords[1],0] new_coord[2,0] = coord[coords[2],0] new_coord[3,0] = coord[coords[3],0] new_coord[4,0] = coord[coords[0],0] new_coord[5,0] = coord[coords[1],0] new_coord[6,0] = coord[coords[2],0] new_coord[7,0] = coord[coords[3],0] new_coord[0,1] = coord[coords[0],1] new_coord[1,1] = coord[coords[1],1] new_coord[2,1] = coord[coords[2],1] new_coord[3,1] = coord[coords[3],1] new_coord[4,1] = coord[coords[0],1] new_coord[5,1] = coord[coords[1],1] new_coord[6,1] = coord[coords[2],1] new_coord[7,1] = coord[coords[3],1] if element_type == 3 or element_type == 4: if np.any(np.isin(eq_els, i, assume_unique=True)) == True: mesh = v.Mesh([coord[coords,:],[[0,1,2,3],[4,5,6,7],[0,3,7,4],[1,2,6,5],[0,1,5,4],[2,3,7,6]]],alpha=alpha,c=f_color).lw(1) else: mesh = v.Mesh([coord[coords,:],[[0,1,2,3],[4,5,6,7],[0,3,7,4],[1,2,6,5],[0,1,5,4],[2,3,7,6]]],alpha=alpha).lw(1) elif element_type == 6: if np.any(np.isin(eq_els, i, assume_unique=True)) == True: mesh = v.Mesh([new_coord,[[0,1,2,3]]],alpha=alpha,c=f_color).lw(1) else: mesh = v.Mesh([new_coord,[[0,1,2,3]]],alpha=alpha).lw(1) if element_type == 3: if i in eq_dict: mesh.name = f"Flow element {i+1}, Force: {eq_dict[i][0]}" else: mesh.name = f"Flow element {i+1}" elif element_type == 4: if i in eq_dict: mesh.name = f"Solid element {i+1}, Force: {eq_dict[i][0]}" else: mesh.name = f"Solid element {i+1}" elif element_type == 6: if i in eq_dict: mesh.name = f"Plate element {i+1}, Force: {eq_dict[i][0]}" else: mesh.name = f"Plate element {i+1}" meshes.append(mesh) if render_nodes == True: if element_type == 3: nodes = vdu.get_node_elements(coord,scale,alpha,dof,bcPrescr,bc,bc_color,fPrescr,f,f_color,dofs_per_node=1) elif element_type == 4 or element_type == 6: nodes = vdu.get_node_elements(coord,scale,alpha,dof,bcPrescr,bc,bc_color,fPrescr,f,f_color,dofs_per_node=3) plot_window.meshes[plot_window.fig].extend(meshes) plot_window.nodes[plot_window.fig].extend(nodes) #print("Adding mesh to figure ",plot_window.fig+1) else: plot_window.meshes[plot_window.fig].extend(meshes) #print("Adding mesh to figure ",plot_window.fig+1) return meshes
b4728316496221b8c9341dd8ae747f74eb08fbaf
3,635,692
def mypad(x, pad, mode='constant', value=0): """ Function to do numpy like padding on tensors. Only works for 2-D padding. Inputs: x (tensor): tensor to pad pad (tuple): tuple of (left, right, top, bottom) pad sizes mode (str): 'symmetric', 'wrap', 'constant, 'reflect', 'replicate', or 'zero'. The padding technique. """ if mode == 'symmetric': # Vertical only if pad[0] == 0 and pad[1] == 0: m1, m2 = pad[2], pad[3] l = x.shape[-2] # noqa xe = reflect(np.arange(-m1, l+m2, dtype='int32'), -0.5, l-0.5) return x[:, :, xe] # horizontal only elif pad[2] == 0 and pad[3] == 0: m1, m2 = pad[0], pad[1] l = x.shape[-1] # noqa xe = reflect(np.arange(-m1, l+m2, dtype='int32'), -0.5, l-0.5) return x[:, :, :, xe] # Both else: m1, m2 = pad[0], pad[1] l1 = x.shape[-1] xe_row = reflect(np.arange(-m1, l1+m2, dtype='int32'), -0.5, l1-0.5) m1, m2 = pad[2], pad[3] l2 = x.shape[-2] xe_col = reflect(np.arange(-m1, l2+m2, dtype='int32'), -0.5, l2-0.5) i = np.outer(xe_col, np.ones(xe_row.shape[0])) j = np.outer(np.ones(xe_col.shape[0]), xe_row) return x[:, :, i, j] elif mode == 'periodic': # Vertical only if pad[0] == 0 and pad[1] == 0: xe = np.arange(x.shape[-2]) xe = np.pad(xe, (pad[2], pad[3]), mode='wrap') return x[:, :, xe] # Horizontal only elif pad[2] == 0 and pad[3] == 0: xe = np.arange(x.shape[-1]) xe = np.pad(xe, (pad[0], pad[1]), mode='wrap') return x[:, :, :, xe] # Both else: xe_col = np.arange(x.shape[-2]) xe_col = np.pad(xe_col, (pad[2], pad[3]), mode='wrap') xe_row = np.arange(x.shape[-1]) xe_row = np.pad(xe_row, (pad[0], pad[1]), mode='wrap') i = np.outer(xe_col, np.ones(xe_row.shape[0])) j = np.outer(np.ones(xe_col.shape[0]), xe_row) return x[:, :, i, j] elif mode == 'constant' or mode == 'reflect' or mode == 'replicate': return F.pad(x, pad, mode, value) elif mode == 'zero': return F.pad(x, pad) else: raise ValueError('Unkown pad type: {}'.format(mode))
48e435e1622a1d74bff0b44e159dc0562e12bb5e
3,635,693
import operator def molarity(compound, setting = None, moles = None, volume = None): """ Calculations involving the molarity of a compound. Returns a value based on the setting. The compound must be the Compound class. The moles/volume setting will be gathered from the compound itself if defined. **Volume is assumed to be in milliliters. Setting --> Molarity: Returns the molarity of the compound from moles and volume. Setting --> Moles: Returns the moles of the compound from molarity and volume. Setting --> Volume: Returns the volume of the compound from moles and volume. """ # Initialize settings: if setting not in ["molarity", "moles", "volume"]: raise ValueError("You must choose a setting: molarity, moles volume.") if not isinstance(compound, Compound): raise AttributeError("You must include a Compound class as the main argument") if compound.volume and not volume: volume = compound.volume if not compound.volume and not volume and setting in ["molarity", "moles"]: raise AttributeError("You must define volume either through the Compound class or through the method.") if compound.mole_amount and not moles: moles = compound.mole_amount if not compound.mole_amount and not moles and setting in ["molarity", "volume"]: raise AttributeError("You must define the mole amount either through the Compound class or through the method.") if not compound.molarity and setting in ["moles", "volume"]: raise AttributeError("You must define the molarity of the solution if you want to calculate molarity.") # Calculations if setting == "molarity": return operator.__truediv__(moles, volume) if setting == "moles": return operator.__mul__(volume, compound.molarity) if setting == "volume": return operator.__truediv__(moles, compound.molarity) else: return None
4fb477115f2c41c5729702b4037aa63abcfaf6f1
3,635,694
def set_initial_det(noa, nob): """ Function Set the initial wave function to RHF/ROHF determinant. Author(s): Takashi Tsuchimochi """ # Note: r'~~' means that it is a regular expression. # a: Number of Alpha spin electrons # b: Number of Beta spin electrons if noa >= nob: # Here calculate 'a_ab' as follow; # |r'(01){a-b}(11){b}'> wrote by regular expression. # e.g.) # a=1, b=1: |11> = |3> # a=3, b=1: |0101 11> = |23> = |3 + 5/2*2^3> # r'(01){a-b}' = (1 + 4 + 16 + ... + 4^(a-b-1))/2 # = (4^(a-b) - 1)/3 # That is, it is the sum of the first term '1' # and the geometric progression of the common ratio '4' # up to the 'a-b' term. base = nob*2 a_ab = (4**(noa-nob) - 1)//3 elif noa < nob: # Here calculate 'a_ab' as follow; # |r'(10){b-a}(11){a}'> wrote by regular expression. # e.g.) # a=1, b=1: |11> = |3> # a=1, b=3: |1010 11> = |43> = |3 + 5*2^3> # r'(10){b-a}' = 2 + 8 + 32 + ... + 2*4^(b-a-1) # = 2 * (4^(b-a) - 1)/3 # That is, it is the sum of the first term '2' # and the geometric progression of the common ratio '4' # up to the 'a-b' term. base = noa*2 a_ab = 2*(4**(nob-noa) - 1)//3 return 2**base-1 + (a_ab<<base)
53b34999014d0926f02308122ad32f88ea08a802
3,635,695
def face_detection(frame): """ detect face using cv2 :param frame: :return: (x,y), w, h: face position x,y coordinates, face width, face height """ if frame is None : return 0,0,0,0 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE ) # Draw a rectangle around the faces position_x, position_y ,width,height = 0, 0, 0, 0 for x, y, w, h in faces: position_x, position_y ,width,height = x, y, w, h return position_x, position_y,width,height
8a75ca46fd78d481fa2ec6d93d7d8123d7bf4463
3,635,696
def coro1(): """ๅฎšไน‰ไธ€ไธช็ฎ€ๅ•็š„ๅŸบไบŽ็”Ÿๆˆๅ™จ็š„ๅ็จ‹ไฝœไธบๅญ็”Ÿๆˆๅ™จ""" word = yield 'hello' yield word return word # ๆณจๆ„่ฟ™้‡Œๅ็จ‹ๅฏไปฅ่ฟ”ๅ›žๅ€ผไบ†๏ผŒ่ฟ”ๅ›ž็š„ๅ€ผไผš่ขซๅกžๅˆฐ StopIteration value ๅฑžๆ€ง ไฝœไธบ yield from ่กจ่พพๅผ็š„่ฟ”ๅ›žๅ€ผ
1bfcfb150748c002638d2c6536299025864ac1f6
3,635,697
import itertools import scipy def plot_combinations_9array3x3_v2(coli_to_test, sorted_combinations, sorted_vals, comb_ind, renaming_fun): """Plot the nine best decompositions of a given set with variables outside the matrix for a decomposition of 3 variables Parameters ---------- coli_to_test : Pandas dataframe cell cycle dataframe sorted_combinations : array of string lists each element of the array is a quadruplet of variable names corresponding to a decomposition. The list is sorted from best to worst decomposition sorted_vals : array of floats independence I value for sorted decompositions comb_ind : numpy array list of indices to plot renaming_fun : str name of function to use for renaming variables Returns ------- fig : matplotlib handles matplotlib reference to plot """ fig, axes = plt.subplots(figsize=(20,20)) axes.set_axis_off() for ind, comb in enumerate(comb_ind): c = list(itertools.product(sorted_combinations[comb], sorted_combinations[comb])) c = [[str(x) for x in y] for y in c] pairwise = np.reshape([scipy.stats.pearsonr(coli_to_test[c[x][0]],coli_to_test[c[x][1]])[0] for x in np.arange(len(c))],(3,3)) names = np.array(np.split(np.array([renaming_fun(x) for x in c]),3)) ax = fig.add_subplot(3, 3, ind+1) ax.imshow(pairwise,cmap = 'seismic',vmin=-1,vmax = 1) for i in range(names.shape[0]): for j in range(names.shape[0]): if np.abs(pairwise[i,j])>0.5: col = 'white' else: col = 'black' #plt.text(x=i-0.1, y=j-0.2, s = names[i,j][0], color = col,size = 30) #plt.text(x=i-0.1, y=j+0.2, s = names[i,j][1], color = col, size = 30) for i in range(names.shape[0]): plt.text(x=-0.6, y=i+0.2, s = names[i,0][0], color = 'black',size = 35, horizontalalignment = 'right') plt.text(x=i-0.0, y=-0.6, s = names[0,i][1], color = 'black',size = 35, horizontalalignment = 'center') ax.set_axis_off() ax.set_title('I: '+str(np.around(sorted_vals[comb],3)),fontsize = 35, pad = 55) if ind==7: break fig.subplots_adjust(hspace = 0.4) #plt.show() return fig
d34684e12b2ffb8157dff33a461ae9ee4f45c818
3,635,698
from typing import Union from typing import Tuple def random_split(df: Union[DataFrame, Series], split_size: float, shuffle: bool = True, random_state: int = None) -> Tuple[DataFrame]: """Shuffles a DataFrame and splits it into 2 partitions according to split_size. Returns a tuple with the split first (partition corresponding to split_size, and remaining second). Args: df (DataFrame): A DataFrame to be split split_size (float): Fraction of the sample to be taken shuffle (bool): If True shuffles sample rows before splitting random_state (int): If an int is passed, the random process is reproducible using the provided seed""" assert random_state is None or (isinstance(random_state, int) and random_state >= 0), 'The random seed must be a non-negative integer or None.' assert 0 <= split_size <= 1, 'split_size must be a fraction, i.e. a float in the [0,1] interval.' if shuffle: # Shuffle dataset rows sample = df.sample(frac=1, random_state=random_state) split_len = int(sample.shape[0] * split_size) split = sample.iloc[:split_len] remainder = sample.iloc[split_len:] return split, remainder
2b69d97d69bebd3257201bf5629bb4e033134f82
3,635,699