content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import torch def get_default_device(): """ Using GPU if available or CPU """ if torch.cuda.is_available(): return torch.device('cuda') else: return torch.device('cpu')
ff65f896938b9e53b78d3a6578883129bc886204
31,700
def build_stateless_broadcaster(): """Just tff.federated_broadcast with empty state, to use as a default.""" return tff.utils.StatefulBroadcastFn( initialize_fn=lambda: (), next_fn=lambda state, value: ( # pylint: disable=g-long-lambda state, tff.federated_broadcast(value)))
6d78f3f452551cb2eb7640bb09ee7541c5a752bd
31,701
def getTestSuite(select="unit"): """ Get test suite select is one of the following: "unit" return suite of unit tests only "component" return suite of unit and component tests "all" return suite of unit, component and integration tests "pending" return suite of pending tests name a single named test to be run """ testdict = { "unit": [ "testLoad" , "testIntCounterSet" , "testFloatCounterSet" , "testIntCounter" , "testIntCounterPublish" , "testIntCounter2" , "testIntCounter3" , "testIntCounter2Reset" , "testIntCounter3Reset" , "testIntCounterDec" , "testIntCounterDec2" , "testIntCounterDec3" , "testIntCounterDec2Reset" , "testIntCounterDec3Reset" , "testFloatCounter" , "testFloatCounterDec" , "testIntCounterReset" ], "zzcomponent": [ "testComponents" ], "zzintegration": [ "testIntegration" ], "zzpending": [ "testPending" ] } return TestUtils.getTestSuite(TestCounters, testdict, select=select)
1816286c04b8b7a2e994522622a5f567869cad48
31,702
from typing import Union from pathlib import Path def find_mo(search_paths=None) -> Union[Path, None]: """ Args: search_paths: paths where ModelOptimizer may be found. If None only default paths is used. Returns: path to the ModelOptimizer or None if it wasn't found. """ default_mo_path = ('intel', 'openvino', 'deployment_tools', 'model_optimizer') default_paths = [Path.home().joinpath(*default_mo_path), Path('/opt').joinpath(*default_mo_path)] executable = 'mo.py' for path in search_paths or default_paths: path = Path(path) if not path.is_dir(): continue mo = path / executable if not mo.is_file(): continue return mo return None
4657e15649692415dd10f2daa6527cade351d8fc
31,703
def autoaugment(dataset_path, repeat_num=1, batch_size=32, target="Ascend"): """ define dataset with autoaugment """ if target == "Ascend": device_num, rank_id = _get_rank_info() else: init("nccl") rank_id = get_rank() device_num = get_group_size() if device_num == 1: ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True) else: ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True, num_shards=device_num, shard_id=rank_id) image_size = 224 mean = [0.485 * 255, 0.456 * 255, 0.406 * 255] std = [0.229 * 255, 0.224 * 255, 0.225 * 255] trans = [ c_vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)), ] post_trans = [ c_vision.RandomHorizontalFlip(prob=0.5), c_vision.Normalize(mean=mean, std=std), c_vision.HWC2CHW() ] dataset = ds.map(operations=trans, input_columns="image") dataset = dataset.map(operations=c_vision.RandomSelectSubpolicy(imagenet_policy), input_columns=["image"]) dataset = dataset.map(operations=post_trans, input_columns="image") type_cast_op = c_transforms.TypeCast(mstype.int32) dataset = dataset.map(operations=type_cast_op, input_columns="label") # apply the batch operation dataset = dataset.batch(batch_size, drop_remainder=True) # apply the repeat operation dataset = dataset.repeat(repeat_num) return dataset
596eb26fe376298327900a240d07c89f6914f76d
31,704
def dropout_mask(x, sz, dropout): """ Applies a dropout mask whose size is determined by passed argument 'sz'. Args: x (torch.Tensor): A torch Variable object sz (tuple(int, int, int)): The expected size of the new tensor dropout (float): The dropout fraction to apply This method uses the bernoulli distribution to decide which activations to keep. Additionally, the sampled activations is rescaled is using the factor 1/(1 - dropout). In the example given below, one can see that approximately .8 fraction of the returned tensors are zero. Rescaling with the factor 1/(1 - 0.8) returns a tensor with 5's in the unit places. The official link to the pytorch bernoulli function is here: http://pytorch.org/docs/master/torch.html#torch.bernoulli Examples: >>> a_Var = torch.autograd.Variable(torch.Tensor(2, 3, 4).uniform_(0, 1), requires_grad=False) >>> a_Var Variable containing: (0 ,.,.) = 0.6890 0.5412 0.4303 0.8918 0.3871 0.7944 0.0791 0.5979 0.4575 0.7036 0.6186 0.7217 (1 ,.,.) = 0.8354 0.1690 0.1734 0.8099 0.6002 0.2602 0.7907 0.4446 0.5877 0.7464 0.4257 0.3386 [torch.FloatTensor of size 2x3x4] >>> a_mask = dropout_mask(a_Var.data, (1,a_Var.size(1),a_Var.size(2)), dropout=0.8) >>> a_mask (0 ,.,.) = 0 5 0 0 0 0 0 5 5 0 5 0 [torch.FloatTensor of size 1x3x4] """ return x.new_empty(*sz).bernoulli_(1-dropout)/(1-dropout)
ae6aebad62fa97014227f4ac68bca68f2eafe95f
31,705
def two_body_mc_force_en_jit(bond_array_1, c1, etypes1, bond_array_2, c2, etypes2, d1, sig, ls, r_cut, cutoff_func, nspec, spec_mask, bond_mask): """Multicomponent two-body force/energy kernel accelerated with Numba's njit decorator.""" kern = 0 ls1 = 1 / (2 * ls * ls) ls2 = 1 / (ls * ls) sig2 = sig * sig bc1 = spec_mask[c1] bc1n = bc1 * nspec for m in range(bond_array_1.shape[0]): ri = bond_array_1[m, 0] ci = bond_array_1[m, d1] fi, fdi = cutoff_func(r_cut, ri, ci) e1 = etypes1[m] be1 = spec_mask[e1] btype = bond_mask[bc1n + be1] tls1 = ls1[btype] tls2 = ls2[btype] tsig2 = sig2[btype] for n in range(bond_array_2.shape[0]): e2 = etypes2[n] # check if bonds agree if (c1 == c2 and e1 == e2) or (c1 == e2 and c2 == e1): rj = bond_array_2[n, 0] fj, _ = cutoff_func(r_cut, rj, 0) r11 = ri - rj B = r11 * ci D = r11 * r11 kern += force_energy_helper(B, D, fi, fj, fdi, tls1, tls2, tsig2) return kern
c027a874b1662d0b9c954302cc3d0b26f77f9a21
31,706
def customize_hrm_programme(**attr): """ Customize hrm_programme controller """ # Organisation needs to be an NS/Branch ns_only(current.s3db.hrm_programme.organisation_id, required=False, branches=False, ) return attr
3ef74f74e09b9c4498700b9f0d20245829d48c42
31,707
def freq_count(line, wrddict, win, ctxcounter, wrdcounter): """ Counts words and context words of a string. line: The sentence as a string. wrddict: Word index mapping. win: Word context window size. ctxcounter: Context Counter. wrdcounter: Word Counter. """ if not (isinstance(line, str)): raise TypeError("NOT A STRING") words = line.split() cnt = 0 for word in words: if word in wrddict: wrdcounter[word] += 1 ctx = 1 while ctx <= win: if (cnt + ctx) < len(words): ctxword = words[cnt + ctx] if ctxword in wrddict: ctxcounter[ctxword] += 1 ctxcounter[word] += 1 ctx += 1 else: break cnt += 1 return wrdcounter, ctxcounter
87ebe01058f8958f5ffe06e0944068c10b26ae44
31,708
def _onehot_encoding_unk(x, allowable_set): """Maps inputs not in the allowable set to the last element.""" if x not in allowable_set: x = allowable_set[-1] return list(map(lambda s: x == s, allowable_set))
3b386c6640bd5e37a6ab2276e090c8ea56eed5ce
31,709
import re def getChironSpec(obnm, normalized=True, slit='slit', normmech='flat', returnFlat=False): """PURPOSE: To retrieve a CHIRON spectrum given the observation name (obnm).""" #extract the date (yymmdd) from the obnm: date = re.search(r'chi(\d{6})', obnm).group(1) #extract the core of the obnm. This will make the code more #robust, allowing people to enter the obnm with or without #the 'a' or 'chi' or 'fits', etc. #output obnm is in the format 'chiyymmdd.####' obnm = re.search(r'(chi\d{6}\.\d{4})', obnm).group(1) scihdu = fits.open('/tous/mir7/fitspec/'+date+'/a'+obnm+'.fits') scidata = scihdu[0].data if normalized: #generate the flat filename: flatfn = '/tous/mir7/flats/chi'+date+'.'+slit+'flat.fits' flathdu = fits.open(flatfn) flatdata = flathdu[0].data #create a 2D array to store the output (wav/spec, #orders, px/order): normspecout = np.zeros([flatdata.shape[1], flatdata.shape[2], 2]) normflatout = np.zeros([flatdata.shape[1], flatdata.shape[2], 2]) #cycle through orders for ord in range(flatdata.shape[1]): #now retrieve the normalized polynomial fit to the master flat: normfit = flatdata[2, 61 - ord, :]/np.max(flatdata[2, 61 - ord, :]) normflatout[ord, :, 1] = flatdata[1, 61 - ord, :]/np.max(flatdata[1, 61 - ord, :]) normflatout[ord, :, 0] = scidata[ord, :, 0] #superimpose stellar spec normspec_init = scidata[ord, :, 1]/np.max(scidata[ord, :, 1]) normspec = normspec_init/normfit[::-1] #determine the number of maximum values to #use in the normalization. In this case we #will use the top 0.5%, which corresponds #to 16 elements for CHIRON: nummax = np.int(np.ceil(0.005 * len(normspec))) #now sort normspec and find the median of the #`nummax` highest values in the old normspec mnhghval = np.median(np.sort(normspec)[-nummax:-1]) #now renormalize by that value: normspecout[ord, :, 1] = normspec / mnhghval normspecout[ord, :, 0] = scidata[ord, :, 0] if returnFlat: return normflatout else: return normspecout else: return scidata
a29090e0838f93feeb8ad758beb1bd563b78178e
31,710
import posixpath def api_routes(api_classes, base_path='/_ah/api', regex='[^/]+'): """Creates webapp2 routes for the given Endpoints v1 services. Args: api_classes: A list of protorpc.remote.Service classes to create routes for. base_path: The base path under which all service paths should exist. If unspecified, defaults to /_ah/api. regex: Regular expression to allow in path parameters. Returns: A list of webapp2.Routes. """ routes = [] # Add routes for each class. for api_class in api_classes: api_base_path = '%s/%s/%s' % ( base_path, api_class.api_info.name, api_class.api_info.version) templates = set() # Add routes for each method of each class. for _, method in sorted(api_class.all_remote_methods().items()): info = method.method_info method_path = info.get_path(api_class.api_info) method_path = method_path.replace('{', '<').replace('}', ':%s>' % regex) t = posixpath.join(api_base_path, method_path) http_method = info.http_method.upper() or 'POST' handler = path_handler(api_class, method, api_base_path) routes.append(webapp2.Route(t, handler, methods=[http_method])) templates.add(t) # Add routes for HTTP OPTIONS (to add CORS headers) for each method. for t in sorted(templates): routes.append(webapp2.Route(t, CorsHandler, methods=['OPTIONS'])) # Add generic routes. routes.extend([ directory_service_route(api_classes, base_path), discovery_service_route(api_classes, base_path), explorer_proxy_route(base_path), explorer_redirect_route(base_path), ]) return routes
47cd1da8300f010e1c3ef7ee8bca21d7139a40ad
31,711
def WrapReportText(text): """Helper to allow report string wrapping (e.g. wrap and indent). Actually invokes textwrap.fill() which returns a string instead of a list. We always double-indent our wrapped blocks. Args: text: String text to be wrapped. Returns: String of wrapped and indented text. """ return wrapper.fill(text)
4818f0d777c8165fd7a762033379741781bf48af
31,712
import math def point_in_wave(point_x, frequency, amplitude, offset_x, offset_y): """Returns the specified point x in the wave of specified parameters.""" return (math.sin((math.pi * point_x)/frequency + offset_x) * amplitude) + offset_y
5a91c9204819492bb3bd42f0d4c9231d39e404d8
31,713
def tile(A, reps): """ Construct an array by repeating A the number of times given by reps. If `reps` has length ``d``, the result will have dimension of ``max(d, A.ndim)``. If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, or shape (1, 1, 3) for 3-D replication. If this is not the desired behavior, promote `A` to d-dimensions manually before calling this function. If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it. Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as (1, 1, 2, 2). Note : Although tile may be used for broadcasting, it is strongly recommended to use numpy's broadcasting operations and functions. Parameters ---------- A : array_like The input array. reps : array_like The number of repetitions of `A` along each axis. Returns ------- c : ndarray The tiled output array. See Also -------- repeat : Repeat elements of an array. broadcast_to : Broadcast an array to a new shape Examples -------- >>> a = np.array([0, 1, 2]) >>> np.tile(a, 2) array([0, 1, 2, 0, 1, 2]) >>> np.tile(a, (2, 2)) array([[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) >>> np.tile(a, (2, 1, 2)) array([[[0, 1, 2, 0, 1, 2]], [[0, 1, 2, 0, 1, 2]]]) >>> b = np.array([[1, 2], [3, 4]]) >>> np.tile(b, 2) array([[1, 2, 1, 2], [3, 4, 3, 4]]) >>> np.tile(b, (2, 1)) array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> c = np.array([1,2,3,4]) >>> np.tile(c,(4,1)) array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) """ try: tup = tuple(reps) except TypeError: tup = (reps,) d = len(tup) if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray): # Fixes the problem that the function does not make a copy if A is a # numpy array and the repetitions are 1 in all dimensions return _nx.array(A, copy=True, subok=True, ndmin=d) else: # Note that no copy of zero-sized arrays is made. However since they # have no data there is no risk of an inadvertent overwrite. c = _nx.array(A, copy=False, subok=True, ndmin=d) if (d < c.ndim): tup = (1,)*(c.ndim-d) + tup shape_out = tuple(s*t for s, t in zip(c.shape, tup)) n = c.size if n > 0: for dim_in, nrep in zip(c.shape, tup): if nrep != 1: c = c.reshape(-1, n).repeat(nrep, 0) n //= dim_in return c.reshape(shape_out)
446247517aaaaecff377571a14384a2bbd0c949f
31,714
import requests def get_super_user_token(endpoint): """ Gets the initialized super user token. This is one time, cant get the token again once initialized. Args: endpoint (str): Quay Endpoint url Returns: str: Super user token """ data = ( f'{{"username": "{constants.QUAY_SUPERUSER}", "password": "{constants.QUAY_PW}", ' f'"email": "quayadmin@example.com", "access_token": true}}' ) r = requests.post( f"{endpoint}/{constants.QUAY_USER_INIT}", headers={"content-type": "application/json"}, data=data, verify=False, ) return r.json()["access_token"]
bf5782fe3cc563b70d7fbd925b4f06e9d29fba1a
31,715
import torch def to_input_variable(sequences, vocab, cuda=False, training=True): """ given a list of sequences, return a tensor of shape (max_sent_len, batch_size) """ word_ids = word2id(sequences, vocab) sents_t, masks = input_transpose(word_ids, vocab['<pad>']) if type(sents_t[0][0]) != list: with torch.no_grad(): sents_var = Variable(torch.LongTensor(sents_t), requires_grad=False) if cuda: sents_var = sents_var.cpu() else: sents_var = sents_t return sents_var
3dea99cdf94a06ce3f1b1be02a49f0d8396cb140
31,716
def create_extreme_conditions_test_matrix(model, filename=None): """ Creates an empty test matrix for evaluating extreme conditions tests. After running this function, the user should edit the file and save with a separate filename to avoid overwriting. Todo: it would be good to make this automatically blank out elements that are not influenced by a variable, or *are* the variable, and to omit rows that have no consequences because nothing depends on them. Also, to omit columns that nothing influences. Also, omit table functions """ docs = model.doc() docs['bounds'] = docs['Unit'].apply(_get_bounds) docs['Min'] = docs['bounds'].apply(lambda x: float(x[0].replace('?', '-inf'))) docs['Max'] = docs['bounds'].apply(lambda x: float(x[1].replace('?', '+inf'))) collector = [] for i, row in docs.iterrows(): collector.append({'Real Name': row['Real Name'], 'Comment': row['Comment'], 'Value': row['Min'], }) collector.append({'Real Name': row['Real Name'], 'Comment': row['Comment'], 'Value': row['Max'], }) conditions = _pd.DataFrame(collector) results = _pd.DataFrame(columns=list(docs['Real Name'])) cols = ['Real Name', 'Comment', 'Value'] + sorted(list(docs['Real Name'])) output = _pd.concat([conditions, results])[cols] if filename is None: return output elif filename.split('.')[-1] in ['xls', 'xlsx']: output.to_excel(filename, sheet_name='Extreme Conditions', index=False) elif filename.split('.')[-1] == 'csv': output.to_csv(filename, index=False) elif filename.split('.')[-1] == 'tab': output.to_csv(filename, sep='\t') else: raise ValueError('Unknown file extension %s' % filename.split('.')[-1])
d58e5ca0455c20b9b1dddb7b5bd448683294961f
31,717
def map_to_docs(solr_response): """ Response mapper that only returns the list of result documents. """ return solr_response['response']['docs']
2661b9075c05a91c241342151d713702973b9c12
31,718
def get_config_type(service_name): """ get the config type based on service_name """ if service_name == "HDFS": type = "hdfs-site" elif service_name == "HDFS": type = "core-site" elif service_name == "MAPREDUCE": type = "mapred-site" elif service_name == "HBASE": type = "hbase-site" elif service_name == "OOZIE": type = "oozie-site" elif service_name == "HIVE": type = "hive-site" elif service_name == "WEBHCAT": type = "webhcat-site" else: type = "global" return type
96793f932334eb8e4a5460767a80ee6a989cee22
31,719
def regression_model(X, y, alpha=.5): """ trains a simple ridge regession model Args: X: y: alpha: Returns: model """ reg = linear_model.Ridge(alpha=alpha, fit_intercept=True) # reg = linear_model.Lasso(alpha = alpha,fit_intercept = True) reg.fit(X, y) return reg
f15741ac95a8738e031d6eb77f9d5bed76f4958d
31,720
def array2d_export(f, u2d, fmt=None, **kwargs): """ export helper for Util2d instances Parameters ---------- f : str filename or existing export instance type (NetCdf only for now) u2d : Util2d instance fmt : str output format flag. 'vtk' will export to vtk **kwargs : keyword arguments min_valid : minimum valid value max_valid : maximum valid value modelgrid : flopy.discretization.Grid model grid instance which will supercede the flopy.model.modelgrid if fmt is set to 'vtk', parameters of vtk.export_array """ assert isinstance(u2d, DataInterface), ( "util2d_helper only helps " "instances that support " "DataInterface" ) assert len(u2d.array.shape) == 2, "util2d_helper only supports 2D arrays" min_valid = kwargs.get("min_valid", -1.0e9) max_valid = kwargs.get("max_valid", 1.0e9) modelgrid = u2d.model.modelgrid if "modelgrid" in kwargs: modelgrid = kwargs.pop("modelgrid") if isinstance(f, str) and f.lower().endswith(".nc"): f = NetCdf(f, u2d.model, **kwargs) if isinstance(f, str) and f.lower().endswith(".shp"): name = shapefile_utils.shape_attr_name(u2d.name, keep_layer=True) shapefile_utils.write_grid_shapefile(f, modelgrid, {name: u2d.array}) return elif isinstance(f, str) and f.lower().endswith(".asc"): export_array(modelgrid, f, u2d.array, **kwargs) return elif isinstance(f, NetCdf) or isinstance(f, dict): # try to mask the array - assume layer 1 ibound is a good mask # f.log("getting 2D array for {0}".format(u2d.name)) array = u2d.array # f.log("getting 2D array for {0}".format(u2d.name)) with np.errstate(invalid="ignore"): if array.dtype not in [int, np.int, np.int32, np.int64]: if ( modelgrid.idomain is not None and "ibound" not in u2d.name.lower() and "idomain" not in u2d.name.lower() ): array[modelgrid.idomain[0, :, :] == 0] = np.NaN array[array <= min_valid] = np.NaN array[array >= max_valid] = np.NaN mx, mn = np.nanmax(array), np.nanmin(array) else: mx, mn = np.nanmax(array), np.nanmin(array) array[array <= min_valid] = netcdf.FILLVALUE array[array >= max_valid] = netcdf.FILLVALUE if ( modelgrid.idomain is not None and "ibound" not in u2d.name.lower() and "idomain" not in u2d.name.lower() and "icbund" not in u2d.name.lower() ): array[modelgrid.idomain[0, :, :] == 0] = netcdf.FILLVALUE var_name = u2d.name if isinstance(f, dict): f[var_name] = array return f array[np.isnan(array)] = f.fillvalue units = "unitless" if var_name in NC_UNITS_FORMAT: units = NC_UNITS_FORMAT[var_name].format( f.grid_units, f.time_units ) precision_str = NC_PRECISION_TYPE[u2d.dtype] if var_name in NC_LONG_NAMES: attribs = {"long_name": NC_LONG_NAMES[var_name]} else: attribs = {"long_name": var_name} attribs["coordinates"] = "latitude longitude" attribs["units"] = units attribs["min"] = mn attribs["max"] = mx if np.isnan(attribs["min"]) or np.isnan(attribs["max"]): raise Exception("error processing {0}: all NaNs".format(var_name)) try: var = f.create_variable( var_name, attribs, precision_str=precision_str, dimensions=f.dimension_names[1:], ) except Exception as e: estr = "error creating variable {0}:\n{1}".format(var_name, str(e)) f.logger.warn(estr) raise Exception(estr) try: var[:] = array except Exception as e: estr = "error setting array to variable {0}:\n{1}".format( var_name, str(e) ) f.logger.warn(estr) raise Exception(estr) return f elif fmt == "vtk": # call vtk array export to folder name = kwargs.get("name", u2d.name) nanval = kwargs.get("nanval", -1e20) smooth = kwargs.get("smooth", False) point_scalars = kwargs.get("point_scalars", False) vtk_grid_type = kwargs.get("vtk_grid_type", "auto") true2d = kwargs.get("true2d", False) binary = kwargs.get("binary", False) vtk.export_array( u2d.model, u2d.array, f, name, nanval=nanval, smooth=smooth, point_scalars=point_scalars, array2d=True, vtk_grid_type=vtk_grid_type, true2d=true2d, binary=binary, ) else: raise NotImplementedError("unrecognized export argument:{0}".format(f))
59c7962d24a688eabebe069500f49d01aef80c28
31,721
def not_daily(request): """ Several timedelta-like and DateOffset instances that are _not_ compatible with Daily frequencies. """ return request.param
e30563d0b6ee62cd995908045ddc356ca58b5796
31,722
from pandas.core.reshape.concat import concat import itertools from typing import List def get_dummies( data, prefix=None, prefix_sep="_", dummy_na=False, columns=None, sparse=False, drop_first=False, dtype=None, ) -> "DataFrame": """ Convert categorical variable into dummy/indicator variables. Parameters ---------- data : array-like, Series, or DataFrame Data of which to get dummy indicators. prefix : str, list of str, or dict of str, default None String to append DataFrame column names. Pass a list with length equal to the number of columns when calling get_dummies on a DataFrame. Alternatively, `prefix` can be a dictionary mapping column names to prefixes. prefix_sep : str, default '_' If appending prefix, separator/delimiter to use. Or pass a list or dictionary as with `prefix`. dummy_na : bool, default False Add a column to indicate NaNs, if False NaNs are ignored. columns : list-like, default None Column names in the DataFrame to be encoded. If `columns` is None then all the columns with `object` or `category` dtype will be converted. sparse : bool, default False Whether the dummy-encoded columns should be backed by a :class:`SparseArray` (True) or a regular NumPy array (False). drop_first : bool, default False Whether to get k-1 dummies out of k categorical levels by removing the first level. dtype : dtype, default np.uint8 Data type for new columns. Only a single dtype is allowed. .. versionadded:: 0.23.0 Returns ------- DataFrame Dummy-coded data. See Also -------- Series.str.get_dummies : Convert Series to dummy codes. Examples -------- >>> s = pd.Series(list('abca')) >>> pd.get_dummies(s) a b c 0 1 0 0 1 0 1 0 2 0 0 1 3 1 0 0 >>> s1 = ['a', 'b', np.nan] >>> pd.get_dummies(s1) a b 0 1 0 1 0 1 2 0 0 >>> pd.get_dummies(s1, dummy_na=True) a b NaN 0 1 0 0 1 0 1 0 2 0 0 1 >>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'], ... 'C': [1, 2, 3]}) >>> pd.get_dummies(df, prefix=['col1', 'col2']) C col1_a col1_b col2_a col2_b col2_c 0 1 1 0 0 1 0 1 2 0 1 1 0 0 2 3 1 0 0 0 1 >>> pd.get_dummies(pd.Series(list('abcaa'))) a b c 0 1 0 0 1 0 1 0 2 0 0 1 3 1 0 0 4 1 0 0 >>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True) b c 0 0 0 1 1 0 2 0 1 3 0 0 4 0 0 >>> pd.get_dummies(pd.Series(list('abc')), dtype=float) a b c 0 1.0 0.0 0.0 1 0.0 1.0 0.0 2 0.0 0.0 1.0 """ dtypes_to_encode = ["object", "category"] if isinstance(data, DataFrame): # determine columns being encoded if columns is None: data_to_encode = data.select_dtypes(include=dtypes_to_encode) elif not is_list_like(columns): raise TypeError("Input must be a list-like for parameter `columns`") else: data_to_encode = data[columns] # validate prefixes and separator to avoid silently dropping cols def check_len(item, name): if is_list_like(item): if not len(item) == data_to_encode.shape[1]: len_msg = ( f"Length of '{name}' ({len(item)}) did not match the " "length of the columns being encoded " f"({data_to_encode.shape[1]})." ) raise ValueError(len_msg) check_len(prefix, "prefix") check_len(prefix_sep, "prefix_sep") if isinstance(prefix, str): prefix = itertools.cycle([prefix]) if isinstance(prefix, dict): prefix = [prefix[col] for col in data_to_encode.columns] if prefix is None: prefix = data_to_encode.columns # validate separators if isinstance(prefix_sep, str): prefix_sep = itertools.cycle([prefix_sep]) elif isinstance(prefix_sep, dict): prefix_sep = [prefix_sep[col] for col in data_to_encode.columns] with_dummies: List[DataFrame] if data_to_encode.shape == data.shape: # Encoding the entire df, do not prepend any dropped columns with_dummies = [] elif columns is not None: # Encoding only cols specified in columns. Get all cols not in # columns to prepend to result. with_dummies = [data.drop(columns, axis=1)] else: # Encoding only object and category dtype columns. Get remaining # columns to prepend to result. with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)] for (col, pre, sep) in zip(data_to_encode.items(), prefix, prefix_sep): # col is (column_name, column), use just column data here dummy = _get_dummies_1d( col[1], prefix=pre, prefix_sep=sep, dummy_na=dummy_na, sparse=sparse, drop_first=drop_first, dtype=dtype, ) with_dummies.append(dummy) result = concat(with_dummies, axis=1) else: result = _get_dummies_1d( data, prefix, prefix_sep, dummy_na, sparse=sparse, drop_first=drop_first, dtype=dtype, ) return result
a3be8d5a3f56d438d1182254749b2967d7bf48fd
31,723
import io def get_call_xlsx(call, submitted=False, proposals=None): """Return the content of an XLSX file for all proposals in a call. Optionally only the submitted ones. Optionally for the given list proposals. """ if proposals is None: title = f"Proposals in {call['identifier']}" proposals = get_call_proposals(call, submitted=submitted) else: title = f"Selected proposals in {call['identifier']}" score_fields = get_review_score_fields(call, proposals) rank_fields, rank_errors = get_review_rank_fields_errors(call, proposals) output = io.BytesIO() wb = xlsxwriter.Workbook(output, {'in_memory': True}) head_text_format = wb.add_format({'bold': True, 'text_wrap': True, 'bg_color': '#9ECA7F', 'font_size': 15, 'align': 'center', 'border': 1}) normal_text_format = wb.add_format({'font_size': 14, 'align': 'left', 'valign': 'vcenter'}) ws = wb.add_worksheet(title[:31]) ws.freeze_panes(1, 1) ws.set_row(0, 60, head_text_format) ws.set_column(1, 1, 40, normal_text_format) ws.set_column(2, 2, 10, normal_text_format) ws.set_column(3, 4, 20, normal_text_format) nrow = 0 row = ['Proposal', 'Proposal title'] row.extend(['Submitted', 'Submitter', 'Email', 'Affiliation']) ncol = len(row) for field in call['proposal']: row.append(field['title'] or field['identifier'].capitalize()) if field['type'] in (constants.LINE, constants.EMAIL): ws.set_column(ncol, ncol, 40, normal_text_format) elif field['type'] == constants.TEXT: ws.set_column(ncol, ncol, 60, normal_text_format) ncol += 1 allow_view_reviews = anubis.call.allow_view_reviews(call) if allow_view_reviews: for rf in rank_fields.values(): row.append(f"Reviews {rf['title']}: ranking factor") row.append(f"Reviews {rf['title']}: stdev") if len(score_fields) >= 2: row.append("Reviews all scores: mean of means") row.append("Reviews all scores: stdev of means") for rf in score_fields.values(): row.append(f"Reviews {rf['title']}: N") row.append(f"Reviews {rf['title']}: mean") row.append(f"Reviews {rf['title']}: stdev") allow_view_decisions = anubis.call.allow_view_decisions(call) if allow_view_decisions: row.append('Decision') row.append('Decision status') for field in call['decision']: if not field.get('banner'): continue title = field['title'] or field['identifier'].capitalize() row.append(title) ws.write_row(nrow, 0, row) nrow += 1 for proposal in proposals: ncol = 0 ws.write_url(nrow, ncol, flask.url_for('proposal.display', pid=proposal['identifier'], _external=True), string=proposal['identifier']) ncol += 1 ws.write_string(nrow, ncol, proposal.get('title') or '') ncol += 1 ws.write_string(nrow, ncol, proposal.get('submitted') and 'yes' or 'no') ncol += 1 user = anubis.user.get_user(username=proposal['user']) ws.write_string(nrow, ncol, utils.get_fullname(user)) ncol += 1 ws.write_string(nrow, ncol, user.get('email') or '') ncol += 1 ws.write_string(nrow, ncol, user.get('affiliation') or '') ncol += 1 for field in call['proposal']: value = proposal['values'].get(field['identifier']) if value is None: ws.write_string(nrow, ncol, '') elif field['type'] == constants.TEXT: ws.write_string(nrow, ncol, value) elif field['type'] == constants.DOCUMENT: ws.write_url(nrow, ncol, flask.url_for('proposal.document', pid=proposal['identifier'], fid=field['identifier'], _external=True), string='Download') elif field['type'] == constants.SELECT: if isinstance(value, list): # Multiselect ws.write(nrow, ncol, '\n'.join(value)) else: ws.write(nrow, ncol, value) else: ws.write(nrow, ncol, value) ncol += 1 if allow_view_reviews: for id in rank_fields.keys(): value = proposal['ranking'][id]['factor'] if value is None: ws.write_string(nrow, ncol, '') else: ws.write_number(nrow, ncol, value) ncol += 1 value = proposal['ranking'][id]['stdev'] if value is None: ws.write_string(nrow, ncol, '') else: ws.write_number(nrow, ncol, value) ncol += 1 if len(score_fields) >= 2: value = proposal['scores']['__mean__'] if value is None: ws.write_string(nrow, ncol, '') else: ws.write_number(nrow, ncol, value) ncol += 1 value = proposal['scores']['__stdev__'] if value is None: ws.write_string(nrow, ncol, '') else: ws.write_number(nrow, ncol, value) ncol += 1 for id in score_fields: ws.write_number(nrow, ncol, proposal['scores'][id]['n']) ncol += 1 value = proposal['scores'][id]['mean'] if value is None: ws.write_string(nrow, ncol, '') else: ws.write_number(nrow, ncol, value) ncol += 1 value = proposal['scores'][id]['stdev'] if value is None: ws.write_string(nrow, ncol, '') else: ws.write_number(nrow, ncol, value) ncol += 1 if allow_view_decisions: decision = anubis.decision.get_decision(proposal.get('decision')) or {} if decision: verdict = decision.get('verdict') if verdict: ws.write(nrow, ncol, 'Accepted') elif verdict is None: ws.write(nrow, ncol, 'Undecided') else: ws.write(nrow, ncol, 'Declined') else: ws.write(nrow, ncol, '-') ncol += 1 if decision.get('finalized'): ws.write(nrow, ncol, 'Finalized') else: ws.write(nrow, ncol, '-') ncol += 1 for field in call['decision']: if not field.get('banner'): continue if decision.get('finalized'): value = decision['values'].get(field['identifier']) ws.write(nrow, ncol, value) else: ws.write_string(nrow, ncol, '') ncol += 1 nrow += 1 wb.close() return output.getvalue()
2077b0b38262d4dff83ebaccc808da3a4e728992
31,724
import os def example_filename(fn): """ Return the full path of a data file that ships with gffutils. """ return os.path.join(HERE, 'test', 'data', fn)
2058bc3fd3c6e603f249855b9f55111f015b70c3
31,725
def to_array(t): """ Converts a taco tensor to a NumPy array. This always copies the tensor. To avoid the copy for dense tensors, see the notes section. Parameters ----------- t: tensor A taco tensor to convert to a NumPy array. Notes ------- Dense tensors export python's buffer interface. As a result, they can be converted to NumPy arrays using ``np.array(tensor, copy=False)`` . Attempting to do this for sparse tensors throws an error. Note that as a result of exporting the buffer interface dense tensors can also be converted to eigen or any other library supporting this inferface. Also it is very important to note that if requesting a NumPy view of data owned by taco, taco will mark the array as read only meaning the user cannot write to that data without using the taco reference. This is needed to avoid raising issues with taco's delayed execution mechanism. Examples ---------- We first look at a simple use of to_array >>> import pytaco as pt >>> t = pt.tensor([2, 2], [pt.dense, pt.compressed]) >>> t.insert([0, 0], 10) >>> t.to_array()[0, 0] 10.0 One could choose to use np.array if a copy is not needed >>> import pytaco as pt >>> import numpy as np >>> t = pt.tensor([2, 2], pt.dense) >>> t.insert([0, 0], 10) >>> a = np.array(t, copy=False) >>> a array([[10., 0.], [ 0., 0.]], dtype=float32) >>> t.insert([0, 0], 100) # Note that insert increments instead of setting! >>> t.to_array()[0, 0] 110.0 Returns --------- arr: numpy.array A NumPy array containing a copy of the data in the tensor object t. """ return np.array(t.to_dense(), copy=True)
29df47e3535c610954e8f1bae828af80ad6ae9f7
31,726
def perform_operation(operator_sign: str, num1: float, num2: float) -> float: """ Perform the operation on the two numbers. Parameters ---------- operator_sign : str Plus, minus, multiplication or division. num1 : float Number 1. num2 : float Number 2. Returns ------- float Result of the operation. Raises ------ ValueError Raise when the operator is not supported. """ operation_object = OPERATIONS.get(operator_sign, None) if operation_object is not None: return operation_object(num1, num2) raise ValueError(f"Not supported operator: {operator_sign}")
e515a103a47b32e2a7197e10a1ad7a395433e7d9
31,727
def whitespace_tokenize(subtokens): """An implementation of BERT's whitespace tokenizer that preserves space.""" return split_subtokens_on( subtokens, lambda char: char.isspace(), are_good=True)
09e451a80b8df66ce0a4401bf3ff681dc9c1b1da
31,728
def moon_phase( ephemerides: skyfield.jpllib.SpiceKernel, time: skyfield.timelib.Timescale ) -> float: """Calculate the phase angle of the Moon. This will be 0 degrees at new moon, 90 degrees at first quarter, 180 degrees at full moon, etc. """ sun = ephemerides[Planets.SUN.value] earth = ephemerides[Planets.EARTH.value] moon = ephemerides[Planets.MOON.value] apparent_sun = earth.at(time).observe(sun).apparent() _, solar_longitude, _ = apparent_sun.frame_latlon(ecliptic_frame) apparent_moon = earth.at(time).observe(moon).apparent() _, lunar_longitude, _ = apparent_moon.frame_latlon(ecliptic_frame) return (lunar_longitude.degrees - solar_longitude.degrees) % 360
8a24d3166816ba42f150866f89fbbfa98f418ed1
31,729
def fib_functools(n): """Return nth fibonacci number starting at fib(1) == 0 using functools decorator.""" # incorrect fib, but the tests expect it if n == 0: return 1 if n in [1, 2]: return n-1 return fib(n - 1) + fib(n - 2)
335908076cff922e9a27dbb2a50e88901fd7e637
31,730
def sync_filter(func, *iterables): """ Filter multiple iterable at once, selecting values at index i such that func(iterables[0][i], iterables[1][i], ...) is True """ return tuple(zip(*tuple(i for i in zip(*iterables) if func(*i)))) or ((),) * len( iterables )
7a2ab5e6356dadff0fe78d3f2bb0da584e0ff41b
31,731
def visit_hostname(hostname): """ Have a chance to visit a hostname before actually using it. :param hostname: The original hostname. :returns: The hostname with the necessary changes. """ for processor in [hostname_ssl_migration, hostname_tld_migration, ]: hostname = processor(hostname) return hostname
dd8d57a88bd5951d9748c362112954e4549cdd6c
31,732
async def wait_for_other(client): """Await other tasks except the current one.""" base_tasks = aio.all_tasks() async def wait_for_other(): ignore = list(base_tasks) + [aio.current_task()] while len(tasks := [t for t in aio.all_tasks() if t not in ignore]): await aio.gather(*tasks, return_exceptions=True) return wait_for_other
ab276973862fd89ba935d70fb2fb72dbd6fe7cfa
31,733
import os def find_stored_stat(directory, this_func, oresult): """ Compute stats from the data saved in a directory Input: directory -- location of json files to be scanned. this_func -- function to be run against the entries found oresult -- dictionary saving the results of this_func calls """ jfiles = os.listdir(directory) for fname in jfiles: ofname = "%s%s%s" % (directory, os.sep, fname) with open(ofname, 'r') as infile: odata = infile.read() odict = loads(odata) for ikey in odict: oresult[ikey] = this_func(odict[ikey]) return oresult
1726997fe091e62c02395536bc1746fba4420f02
31,734
def register_series(series, ref, pipeline): """Register a series to a reference image. Parameters ---------- series : Nifti1Image object The data is 4D with the last dimension separating different 3D volumes ref : Nifti1Image or integer or iterable Returns ------- transformed_list, affine_list """ if isinstance(ref, nib.Nifti1Image): static = ref static_data = static.get_fdata() s_aff = static.affine moving = series moving_data = moving.get_fdata() m_aff = moving.affine elif isinstance(ref, int) or np.iterable(ref): data = series.get_fdata() idxer = np.zeros(data.shape[-1]).astype(bool) idxer[ref] = True static_data = data[..., idxer] if len(static_data.shape) > 3: static_data = np.mean(static_data, -1) moving_data = data[..., ~idxer] m_aff = s_aff = series.affine affine_list = [] transformed_list = [] for ii in range(moving_data.shape[-1]): this_moving = moving_data[..., ii] transformed, affine = affine_registration(this_moving, static_data, moving_affine=m_aff, static_affine=s_aff, pipeline=pipeline) transformed_list.append(transformed) affine_list.append(affine) return transformed_list, affine_list
3afefd4cf1f33cba1d04bd49437e33cfcfbdb578
31,735
import random def normal27(startt,endt,money2,first,second,third,forth,fifth,sixth,seventh,zz1,zz2,bb1,bb2,bb3,aa1,aa2): """ for source and destination id generation """ """ for type of banking work,label of fraud and type of fraud """ idvariz=random.choice(zz2) idgirande=random.choice(aa2) first.append("transfer") second.append(idvariz) third.append(idgirande) sixth.append("0") seventh.append("none") """ for amount of money generation """ numberofmoney=random.randrange(50000,money2) forth.append(numberofmoney) """ for date and time generation randomly between two dates """ final=randomDate(startt,endt, random.random()) fifth.append(final) return (first,second,third,forth,fifth,sixth,seventh)
469c0a88a77b083d666e938d4d13199987918e1d
31,736
def linear_diophantine(a, b, c): """Solve ax + by = c, where x, y are integers 1. solution exists iff c % gcd(a,b) = 0 2. all solutions have form (x0 + b'k, y0 - a'k) Returns ------- None if no solutions exists (x0, y0, a', b') otherwise """ # d = pa + qb p, q, d = extended_euclidian(a, b) if d == 0 or c % d != 0: return None # ax + by = c <=> a'x + b'y = c' a, b, c = a // d, b // d, c // d return p * c, q * c, a, b
6b5fdebe7508249978ea97f0a40330d2ed2243b8
31,737
from operator import and_ def count_per_packet_loss(organization_id, asset_type=None, asset_status=None, data_collector_ids=None, gateway_ids=None, device_ids=None, min_signal_strength=None, max_signal_strength=None, min_packet_loss=None, max_packet_loss=None): """ Count assets (devices+gateways) grouped by specific ranges of packet loss values Parameters: - asset_type: for filtering, count only this type of asset ("device", "gateway" or "none" for no assets). - asset_status: for filtering, count only assets with this status ("connected" or "disconnected"). - data_collector_ids[]: for filtering, count only the assets connected to ANY one of these data collectors. - gateway_ids[]: for filtering, count only the assets connected to ANY one of these gateways. - device_ids[]: for filtering, list only the assets related to ANY of these devices - min_signal_strength: for filtering, count only the assets with signal strength not lower than this value (dBm) - max_signal_strength: for filtering, count only the assets with signal strength not higher than this value (dBm) - min_packet_loss: for filtering, count only the assets with packet loss not lower than this value (percentage) - max_packet_loss: for filtering, count only the assets with packet loss not higher than this value (percentage) Returns: - List of dicts, where each dict has the packet loss range id and name, and the count of assets """ # The packet loss ranges are defined as [L,R) = [range_limits[i], range_limits[i+1]) for every 0 <= i <= 9 range_limits = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 101] range_names = ['[0,10)', '[10,20)', '[20,30)', '[30,40)', '[40,50)', '[50,60)', '[60,70)', '[70,80)', '[80,90)', '[90,100]'] packets_up = build_count_subquery(CounterType.PACKETS_UP) packets_down = build_count_subquery(CounterType.PACKETS_DOWN) packets_lost = build_count_subquery(CounterType.PACKETS_LOST) dev_query = db.session.query() gtw_query = db.session.query() for i in range(0, len(range_names)): name = range_names[i] L = range_limits[i] R = range_limits[i+1] dev_query = dev_query.add_column(func.count(distinct(Device.id)).filter(and_( packets_up.c.count + packets_down.c.count + packets_lost.c.count > 0, L <= 100*packets_lost.c.count/(packets_up.c.count + packets_down.c.count + packets_lost.c.count), R > 100*packets_lost.c.count/(packets_up.c.count + packets_down.c.count + packets_lost.c.count), )).label(name)) # Gateways are not considered because they don't have the loss value gtw_query = gtw_query.add_column(expression.literal_column("0").label(name)) dev_query = dev_query.\ select_from(Device).\ filter(Device.organization_id==organization_id).\ filter(Device.pending_first_connection==False).\ join(packets_up, Device.id == packets_up.c.device_id).\ join(packets_down, Device.id == packets_down.c.device_id).\ join(packets_lost, Device.id == packets_lost.c.device_id) queries = add_filters( dev_query = dev_query, gtw_query = gtw_query, asset_type = asset_type, asset_status = asset_status, data_collector_ids=data_collector_ids, gateway_ids = gateway_ids, device_ids = device_ids, min_signal_strength = min_signal_strength, max_signal_strength = max_signal_strength, min_packet_loss = min_packet_loss, max_packet_loss = max_packet_loss) dev_query = queries[0] gtw_query = queries[1] result = query_for_count(dev_query = dev_query, gtw_query = gtw_query, asset_type = asset_type) counts = defaultdict(lambda: {'name' : None, 'count' : 0}) if asset_type is not 'none': for row in result: if len(row) != len(range_names): log.error(f"Length of range_names and length of row in packet loss query result don't match ({len(range_names)}, {len(row)})") raise Exception() for i in range(0, len(row)): name = range_names[i] L = range_limits[i] R = range_limits[i+1] counts[(L,R)]['name'] = name counts[(L,R)]['count'] += row[i] else: for i in range(0, len(range_names)): name = range_names[i] L = range_limits[i] R = range_limits[i+1] counts[(L,R)]['name'] = name counts[(L,R)]['count'] = 0 return [{'id' : k, 'name':v['name'], 'count':v['count']} for k, v in counts.items()]
bc202c8e0e77921f74281ff58857659279dba8f7
31,738
def sample_nodes(g, p): """ Obtains a sampled network via Bernoulli node sampling. For each node in g, sample it with probability p, and add edge (i, j) only if both nodes i and j have been sampled. Parameters ---------------- g: a networkx graph object p: sampling probability for each node """ # Initialize empty network g_new = nx.empty_graph() ###TIP: #TODO: Write code for sampling. Iterate over nodes, and add to g_new with probability p ###TIP: #TODO add edges if both nodes in an edge have been observed. # YOUR CODE HERE return g_new
d623d232425b9d6099b49506c2ccec09ef512b1d
31,739
import os from datetime import datetime from pathlib import Path def JAlien(commands: str = '') -> int: """Main entry-point for interaction with AliEn""" global AlienSessionInfo, _JSON_OUT import_aliases() wb = None # Command mode interaction if commands: AlienSessionInfo['exitcode'] = ProcessCommandChain(wb, commands) return AlienSessionInfo['exitcode'] # Start interactive mode wb = InitConnection() # we are doing the connection recovery and exception treatment in AlienConnect() # Begin Shell-like interaction if _HAS_READLINE: rl.parse_and_bind("tab: complete") rl.set_completer_delims(" ") def complete(text, state): prompt_line = rl.get_line_buffer() tokens = prompt_line.split() results = [] if len(tokens) == 0: results = [f'{x} ' for x in AlienSessionInfo['commandlist']] elif len(tokens) == 1 and not prompt_line.endswith(' '): results = [f'{x} ' for x in AlienSessionInfo['commandlist'] if x.startswith(text)] + [None] else: results = lfn_list(wb, text) + [None] return results[state] rl.set_completer(complete) setupHistory() # enable history saving print_out('Welcome to the ALICE GRID\nsupport mail: adrian.sevcenco@cern.ch\n') if os.getenv('ALIENPY_PROMPT_DATE'): AlienSessionInfo['show_date'] = True if os.getenv('ALIENPY_PROMPT_CWD'): AlienSessionInfo['show_lpwd'] = True if not os.getenv('ALIENPY_NO_CWD_RESTORE'): SessionRestore(wb) while True: INPUT = None prompt = f"AliEn[{AlienSessionInfo['user']}]:{AlienSessionInfo['currentdir']}" if AlienSessionInfo['show_date']: prompt = f'{datetime.datetime.now().replace(microsecond=0).isoformat()} {prompt}' if AlienSessionInfo['show_lpwd']: prompt = f'{prompt} local:{Path.cwd().as_posix()}' prompt = f'{prompt} >' try: INPUT = input(prompt) except EOFError: exit_message() if not INPUT: continue AlienSessionInfo['exitcode'] = ProcessCommandChain(wb, INPUT) return AlienSessionInfo['exitcode']
b2d20fab45d1e598dd713343d69f45a36eecbcd0
31,740
import re def _slug_strip(value, separator=None): """ Cleans up a slug by removing slug separator characters that occur at the beginning or end of a slug. If an alternate separator is used, it will also replace any instances of the default '-' separator with the new separator. """ if separator == '-' or not separator: re_sep = '-' else: re_sep = '(?:-|%s)' % re.escape(separator) value = re.sub('%s+' % re_sep, separator, value) return re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)
ade4274643191ee702fe39ccefccc5d68ed3a8cb
31,741
def get_segments_loudness_max(h5, songidx=0): """ Get segments loudness max array. Takes care of the proper indexing if we are in aggregate file. By default, return the array for the first song in the h5 file. To get a regular numpy ndarray, cast the result to: numpy.array( ) """ if h5.root.analysis.songs.nrows == songidx + 1: return h5.root.analysis.segments_loudness_max[h5.root.analysis.songs.cols.idx_segments_loudness_max[songidx]:] return h5.root.analysis.segments_loudness_max[h5.root.analysis.songs.cols.idx_segments_loudness_max[songidx]: h5.root.analysis.songs.cols.idx_segments_loudness_max[songidx + 1]]
a65111b565686a57add325cc4c29d16b37aa89e8
31,742
def take_along_axis(arr, indices, axis): """ Takes values from the input array by matching 1d index and data slices. This iterates over matching 1d slices oriented along the specified axis in the index and data arrays, and uses the former to look up values in the latter. These slices can be different lengths. Args: arr (Tensor): Source array with shape `(Ni…, M, Nk…)`. indices (Tensor): Indices with shape `(Ni…, J, Nk…)` to take along each 1d slice of `arr`. This must match the dimension of `arr`, but dimensions `Ni` and `Nj` only need to broadcast against `arr`. axis (int): The axis to take 1d slices along. If `axis` is None, the input array is treated as if it had first been flattened to 1d. Returns: Tensor, the indexed result, with shape `(Ni…, J, Nk…)`. Raises: ValueError: If input array and indices have different number of dimensions. TypeError: If the input is not a Tensor. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Example: >>> import mindspore.numpy as np >>> x = np.arange(12).reshape(3, 4) >>> indices = np.arange(3).reshape(1, 3) >>> output = np.take_along_axis(x, indices, 1) >>> print(output) [[ 0 1 2] [ 4 5 6] [ 8 9 10]] """ _check_input_tensor(arr, indices) if axis is None: arr = ravel(arr) axis = 0 ndim = F.rank(arr) if ndim != F.rank(indices): _raise_value_error('`indices` and `arr` must have the same number of dimensions') axis = _check_axis_in_range(axis, ndim) shape_arr = F.shape(arr) shape_indices = F.shape(indices) # broadcasts indices against the shape of arr except at axis indices = _broadcast_to(indices, _tuple_slice(shape_indices, None, axis), _tuple_slice(shape_arr, None, axis), ndim) indices = _broadcast_to(indices, _tuple_slice(shape_arr, None, axis + 1) + _tuple_slice(shape_indices, axis + 1, None), shape_arr, ndim) return F.gather_d(arr, axis, indices)
84492b7ac09b26510dfe3851122587a702753b04
31,743
def _is_possible_grab(grid_world, agent_id, object_id, grab_range, max_objects): """ Private MATRX method. Checks if an :class:`matrx.objects.env_object.EnvObject` can be grabbed by an agent. Parameters ---------- grid_world : GridWorld The :class:`matrx.grid_world.GridWorld` instance in which the object is sought according to the `object_id` parameter. agent_id : str The string representing the unique identified that represents the agent performing this action. object_id : str Optional. Default: ``None`` The string representing the unique identifier of the :class:`matrx.objects.env_object.EnvObject` that should be grabbed. When not given, a random object within range is selected. grab_range : int Optional. Default: ``np.inf`` The range in which the to be grabbed :class:`matrx.objects.env_object.EnvObject` should be in. max_objects : int Optional. Default: ``np.inf`` The maximum of objects the agent can carry. Returns ------- GrabObjectResult Depicts the action's expected success or failure and reason for that result. Can contain the following results: * RESULT_SUCCESS: When the object can be successfully grabbed. * RESULT_NO_OBJECT : When `object_id` is not given. * RESULT_CARRIES_OBJECT: When the agent already carries the maximum nr. objects. * NOT_IN_RANGE: When `object_id` not within range. * RESULT_AGENT: If the `object_id` is that of an agent. * RESULT_OBJECT_CARRIED: When the object is already carried by another agent. * RESULT_OBJECT_UNMOVABLE: When the object is not movable. * RESULT_UNKNOWN_OBJECT_TYPE: When the `object_id` does not exists in the :class:`matrx.grid_world.GridWorld`. """ reg_ag = grid_world.registered_agents[agent_id] # Registered Agent loc_agent = reg_ag.location # Agent location if object_id is None: return GrabObjectResult(GrabObjectResult.RESULT_NO_OBJECT, False) # Already carries an object if len(reg_ag.is_carrying) >= max_objects: return GrabObjectResult(GrabObjectResult.RESULT_CARRIES_OBJECT, False) # Go through all objects at the desired locations objects_in_range = grid_world.get_objects_in_range(loc_agent, object_type="*", sense_range=grab_range) objects_in_range.pop(agent_id) # Set random object in range if not object_id: # Remove all non objects from the list for obj in list(objects_in_range.keys()): if obj not in grid_world.environment_objects.keys(): objects_in_range.pop(obj) # Select a random object if objects_in_range: object_id = grid_world.rnd_gen.choice(list(objects_in_range.keys())) else: return GrabObjectResult(GrabObjectResult.NOT_IN_RANGE, False) # Check if object is in range if object_id not in objects_in_range: return GrabObjectResult(GrabObjectResult.NOT_IN_RANGE, False) # Check if object_id is the id of an agent if object_id in grid_world.registered_agents.keys(): # If it is an agent at that location, grabbing is not possible return GrabObjectResult(GrabObjectResult.RESULT_AGENT, False) # Check if it is an object if object_id in grid_world.environment_objects.keys(): env_obj = grid_world.environment_objects[object_id] # Environment object # Check if the object is not carried by another agent if len(env_obj.carried_by) != 0: return GrabObjectResult(GrabObjectResult.RESULT_OBJECT_CARRIED.replace("{AGENT_ID}", str(env_obj.carried_by)), False) elif not env_obj.properties["is_movable"]: return GrabObjectResult(GrabObjectResult.RESULT_OBJECT_UNMOVABLE, False) else: # Success return GrabObjectResult(GrabObjectResult.RESULT_SUCCESS, True) else: return GrabObjectResult(GrabObjectResult.RESULT_UNKNOWN_OBJECT_TYPE, False)
a57d120747199b84b3047d822547b5367d2b9905
31,744
def euclidean_distance_matrix(embeddings): """Get euclidean distance matrix based on embeddings Args: embeddings (:obj:`numpy.ndarray`): A `ndarray` of shape `[num_sensors, dim]` that translates each sensor into a vector embedding. Returns: A `ndarray` of shape `[num_sensors, num_sensors]` where each element is the euclidean distance between two sensors. """ num_sensors = embeddings.shape[0] distance_matrix = np.zeros((num_sensors, num_sensors), dtype=np.float32) for i in range(num_sensors): for j in range(num_sensors): distance_matrix[i, j] = distance.euclidean( embeddings[i, :], embeddings[j, :] ) return distance_matrix
5b50248a94eb926078a20fd5efbac83f115c26b0
31,745
def get_actor_id(name): """ Get TMDB id for an actor based on their name. If more than one result (likely), fetches the first match. TMDB results are sorted by popularity, so first match is likely to be the one wanted. """ search = tmdb.Search() search.person(query=name) # get id of first result tmdb_id = search.results[0]['id'] return tmdb_id
ac75cbaac7dec85fd965d8cc421c6bbba8fc5f67
31,746
import json def generate_prompt( test_case_path, prompt_path, solutions_path, tokenizer, starter_path=None ): """ Generate a prompt for a given test case. Original version from https://github.com/hendrycks/apps/blob/main/eval/generate_gpt_codes.py#L51. """ _input = "\nQUESTION:\n" with open(prompt_path, "r") as f: data = f.readlines() data = "".join(data) _input += data if starter_path != None: with open(starter_path, "r") as f: data = f.readlines() data = "".join(data) data = "\n" + data # + "\n" _input += data else: # _input += "\n\n" pass with open(test_case_path, "r") as f: data = json.load(f) if not data.get("fn_name"): _input += "\nUse Standard Input format" # \n" else: _input += "\nUse Call-Based format" # \n" _input += "\nANSWER:\n" return _input
ecd3218839b346741e5beea8ec7113ea2892571e
31,747
def projects_upload_to(instance, filename): """construct path to uploaded project archives""" today = timezone.now().strftime("%Y/%m") return "projects/{date}/{slug}/{filename}".format( date=today, slug=instance.project.slug, filename=filename)
01f97cf5994cca7265ede0a4b5c73672f61e2f90
31,748
def assign_employee(id): """ Assign a department and a role to an employee """ check_admin() employee = Employee.query.get_or_404(id) form = EmployeeAssignForm(obj=employee) employee.department = form.department.data employee.role = form.role.data db.session.add(employee) db.session.commit() flash('You have successfully assigned a department and role.') # redirect to the roles page return redirect(url_for('admin.list_employees')) return render_template('admin/employees/employee_assign.html', employee=employee, form=form, title='Assign Employee')
f88a1b49cadf73d8a62c0be23742fd03cace36cd
31,749
import copy def autofocus(field, nm, res, ival, roi=None, metric="average gradient", minimizer="lmfit", minimizer_kwargs=None, padding=True, num_cpus=1): """Numerical autofocusing of a field using the Helmholtz equation. Parameters ---------- field: 1d or 2d ndarray Electric field is BG-Corrected, i.e. field = EX/BEx nm: float Refractive index of medium. res: float Size of wavelength in pixels. ival: tuple of floats Approximate interval to search for optimal focus in px. roi: rectangular region of interest (x1, y1, x2, y2) Region of interest of `field` for which the metric will be minimized. If not given, the entire `field` will be used. metric: str - "average gradient" : average gradient metric of amplitude - "rms contrast" : RMS contrast of phase data - "spectrum" : sum of filtered Fourier coefficients minimizer: str - "lmfit" : lmfit-based minimizer - "legacy" : only use for reproducing old results minimizer_kwargs: dict Optional keyword arguments to the `minimizer` function padding: bool Perform padding with linear ramp from edge to average to reduce ringing artifacts. .. versionchanged:: 0.1.4 improved padding value and padding location num_cpus: int Not implemented. Returns ------- d, field [, other]: The focusing distance, the field, and optionally any other data returned by the minimizer (specify via `minimizer_kwargs`). Notes ----- This method uses :class:`nrefocus.RefocusNumpy` for refocusing of 2D fields. This is because the :func:`nrefocus.refocus_stack` function uses `async` which appears to not work with e.g. :mod:`pyfftw`. """ fshape = len(field.shape) if fshape == 1: # 1D field rfcls = iface.RefocusNumpy1D elif fshape == 2: # 2D field rfcls = iface.RefocusNumpy else: raise AssertionError("Dimension of `field` must be 1 or 2.") if minimizer_kwargs is None: minimizer_kwargs = {} else: minimizer_kwargs = copy.deepcopy(minimizer_kwargs) # use a made-up pixel size so we can use the new `Refocus` interface pixel_size = 1 rf = rfcls(field=field, wavelength=res*pixel_size, pixel_size=pixel_size, medium_index=nm, distance=0, kernel="helmholtz", padding=padding ) data = rf.autofocus(metric=metric, minimizer=minimizer, interval=np.array(ival)*rf.pixel_size, roi=roi, minimizer_kwargs=minimizer_kwargs, ret_grid=False, ret_field=True, ) return data
a954f96cf8c3c16dbdfb9ea31c22e61cac2a9245
31,750
def ZeroPaddedRoundsError(handler=None): """error raised if hash was recognized but contained zero-padded rounds field""" return MalformedHashError(handler, "zero-padded rounds")
b0ff8bb894505041382aaf2d79f027708c7a2134
31,751
def get_adj_mat(G): """Represent ppi network as adjacency matrix Parameters ---------- G : networkx graph ppi network, see get_ppi() Returns ------- adj : square sparse scipy matrix (i,j) has a 1 if there is an interaction reported by irefindex ids : list same length as adj, ith index contains irefindex unique identifier for gene whose interactions are reported in the ith row of adj """ ids = G.nodes() adj = nx.to_scipy_sparse_matrix(G, nodelist=ids, dtype=bool) return adj, ids
95ee8df6be45f12df8da93c7fed10a3c8a32a058
31,752
def load_ndarray_list(fname): """Load a list of arrays saved by `save_ndarray_list`. Parameters ---------- fname : string filename to load. Returns ------- la : list of np.ndarrays The list of loaded numpy arrays. This should be identical tp what was saved by `save_ndarray_list`. """ d1 = np.load(fname) la = [ v for i, v in sorted(d1.iteritems(), key=lambda kv: int(kv[0]))] #la = [ v for i, v in sorted(d1.items(), key=lambda kv: int(kv[0]))] return la
fc76373d45c8934bd81d6ac7e144dff97ae6d1c9
31,753
def save_result(data, format, options=UNSET) -> ProcessBuilder: """ Save processed data to storage :param data: The data to save. :param format: The file format to save to. It must be one of the values that the server reports as supported output file formats, which usually correspond to the short GDAL/OGR codes. If the format is not suitable for storing the underlying data structure, a `FormatUnsuitable` exception will be thrown. This parameter is *case insensitive*. :param options: The file format parameters to be used to create the file(s). Must correspond to the parameters that the server reports as supported parameters for the chosen `format`. The parameter names and valid values usually correspond to the GDAL/OGR format options. :return: `false` if saving failed, `true` otherwise. """ return process('save_result', data=data, format=format, options=options)
be9e8f36869cbe2fdf7b938dfd59cf7b8743ff2a
31,754
def load_suites_from_classes(classes): # type: (Sequence[Any]) -> List[Suite] """ Load a list of suites from a list of classes. """ return list( filter( lambda suite: not suite.hidden, map(load_suite_from_class, classes) ) )
6c4b45c7ab99a3e3f7742f247ea65552c7c70927
31,755
import torch def update(quantized_model, distilD): """ Update activation range according to distilled data quantized_model: a quantized model whose activation range to be updated distilD: distilled data """ print('******updateing BN stats...', end='') with torch.no_grad(): for batch_idx, inputs in enumerate(distilD): if isinstance(inputs, list): inputs = inputs[0] inputs = inputs.cuda() outputs = quantized_model(inputs) print(' Finished******') return quantized_model
70e4cd9032e12f1f461c1cd13ac81ead06091728
31,756
from sys import path def save_chunks(chunk_sound, out_path, video_id): """ Saves chunked speech intervals as WAV file. :param chunk_sound: A parselmouth.praat Sound object # :param adjustment: The padding time on either side of target speech :param out_path: The output path of the wav file :param video_id: The original soundfile name (w/o ext) :return logfile_entry: Row with chunk metadata to be written to log """ chunk_start_ms = int(chunk_sound.get_start_time()*1000) chunk_end_ms = int(chunk_sound.get_end_time()*1000) chunk_duration = chunk_end_ms - chunk_start_ms chunk_fn = '{0}_{1}_{2}.wav'.format(video_id, chunk_start_ms, chunk_end_ms) chunk_file_path = path.join(out_path, chunk_fn) chunk_sound.save(chunk_file_path, 'WAV') return {'filename': chunk_fn, 'video_id': video_id, 'start_time': chunk_start_ms, 'end_time': chunk_end_ms, 'duration': chunk_duration}
dda57e949e5a4a907082eb833dc6c91fd9fa7ec2
31,757
def _get_color(value): """To make positive DFCs plot green, negative DFCs plot red.""" green, red = sns.color_palette()[2:4] if value >= 0: return green return red
888edb2307bd6f4da65c6c1d5c0a40cb146dfa8c
31,758
def parse_feed(feed: str) -> list: """ Parses a TV Show *feed*, returning the episode files included in that feed. :param feed: the feed to parse :return: list of episode files included in *feed* """ try: root = ElementTree.fromstring(feed) except ElementTree.ParseError as error: raise ParseError(str(error)) channel = root.find('channel') if channel is None: raise ParseError("feed's format is invalid: missing 'channel' element") # This function is used multiple times in the loop below def attr(item, attribute): """ Fetches the text of *attribute* from *item* element """ attribute = item.find(attribute) if attribute is None: raise ParseError(f"item is missing required attribute {attribute}") return attribute.text files = [] for item in channel.findall('item'): item = { "title": attr(item, 'title'), "link": attr(item, 'link') } files.append(parse_item(item)) return files
6fec9c1d71d3ae31480103dd2e6a2f5d81e12287
31,759
import subprocess def release_job(job_id): """ Release a job :param job_id: int, job id :return: if success, return 1, else return 0 """ try: step_process = subprocess.Popen(('qrls', str(job_id)), shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, stderr = step_process.communicate() return 1 except Exception as e: print(e) return 0
245ff5217cb6c62f01b5293e859facbf30f35d61
31,760
def head_finder(board_matrix): """ Function: head_finder() Description: this will find the head of your snake Input: board_matrix: This is an list of lists that represents the current board of Battle snake. Follows board_matrix[y][x] Output: head_xy: this is a list containing two elements: x,y. These coords are the location of your head in the board matrix """ head_pos_y = len(board_matrix)-1 head_token = board_matrix[0] head_pos_x = len(head_token)-1 x_max = head_pos_x head_indicator = 'mh' end = False while head_pos_y >= 0: while head_pos_x >= 0: if board_matrix[head_pos_y][head_pos_x] == head_indicator: end = True break else: head_pos_x -= 1 if end is True: break else: head_pos_y -= 1 head_pos_x = x_max head_xy = [head_pos_x, head_pos_y] if debug is True: print("\nhead_finder DEBUG:") print("-------------------") #debug print out print("head_finder returns:") print("head_xy: %s" % (head_xy) ) print("-------------------") return head_xy
e4682ac3b91e2023d3c9fe979762211df9978eec
31,761
def copy_emb_weights(embedding, idx2word, embedding_weights, emb_index_dict, vocab_size): """Copy from embs weights of words that appear in our short vocabulary (idx2word).""" c = 0 for i in range(vocab_size): w = idx2word[i] g = emb_index_dict.get(w, emb_index_dict.get(w.lower())) if g is None and w.startswith('#'): # glove has no hastags (I think...) w = w[1:] g = emb_index_dict.get(w, emb_index_dict.get(w.lower())) if g is not None: embedding[i, :] = embedding_weights[g, :] c += 1 print('number of tokens, in small vocab: {:,} found in embeddings and copied to embedding: {:.4f}'.format(c, c / float(vocab_size))) return embedding
e5d361efd342cc7e194ee325fdf4a98831121576
31,762
def cost_aggregation(c_v, max_d): """ the formula Lr(p,d) = C(p,d) + min[Lr(p-r, d),Lr(p-r, d-1)+p1,Lr(p-r,d+1)+p1,miniLr(p-r, i)+p2] - minkLr(p-r,k) :param c_v: :param max_d: :return: sum of all the Lr """ (H, W, D) = c_v.shape p1 = 10 p2 = 120 Lr1 = np.zeros((H, W, D), np.uint32) # from up Lr2 = np.zeros((H, W, D), np.uint32) # from left Lr3 = np.zeros((H, W, D), np.uint32) # from right Lr4 = np.zeros((H, W, D), np.uint32) # from down # Lr5 = np.zeros((H, W, D), np.uint32) # from up_left print('agg from up started.') Lr1[0, :, :] = c_v[0, :, :] # border, first row for r in range(1, H): for d in range(0, max_d): Lr1_1 = np.squeeze(Lr1[r - 1, :, d]) if d != 0: # disparity is not the bottom Lr1_2 = np.squeeze(Lr1[r - 1, :, d - 1] + p1) else: Lr1_2 = np.squeeze(Lr1_1 + p1) if d != max_d - 1: Lr1_3 = np.squeeze(Lr1[r - 1, :, d + 1] + p1) else: Lr1_3 = np.squeeze(Lr1_1 + p1) Lr1_4 = np.squeeze(np.min(Lr1[r - 1, :, :], axis=1) + p2) Lr1_5 = np.min(Lr1[r - 1, :, :], axis=1) Lr1[r, :, d] = c_v[r, :, d] + np.min(np.vstack([Lr1_1, Lr1_2, Lr1_3, Lr1_4]), axis=0) - Lr1_5 print('agg from left started.') Lr2[:, 0, :] = c_v[:, 0, :] # border, first column for c in range(1, W): for d in range(0, max_d): Lr2_1 = np.squeeze(Lr2[:, c - 1, d]) if d != 0: # disparity is not the bottom Lr2_2 = np.squeeze(Lr2[:, c - 1, d - 1] + p1) else: Lr2_2 = np.squeeze(Lr2_1 + p1) if d != max_d - 1: Lr2_3 = np.squeeze(Lr2[:, c - 1, d + 1] + p1) else: Lr2_3 = np.squeeze(Lr2_1 + p1) Lr2_4 = np.squeeze(np.min(Lr2[:, c - 1, :], axis=1) + p2) Lr2_5 = np.min(Lr2[:, c - 1, :], axis=1) Lr2[:, c, d] = c_v[:, c, d] + np.min(np.vstack([Lr2_1, Lr2_2, Lr2_3, Lr2_4]), axis=0) - Lr2_5 print('agg from right started.') Lr3[:, 0, :] = c_v[:, -1, :] # border, last column for c in range(W - 2, -1, -1): for d in range(0, max_d): Lr3_1 = np.squeeze(Lr3[:, c + 1, d]) if d != 0: # disparity is not the bottom Lr3_2 = np.squeeze(Lr3[:, c + 1, d - 1] + p1) else: Lr3_2 = np.squeeze(Lr3_1 + p1) if d != max_d - 1: Lr3_3 = np.squeeze(Lr3[:, c + 1, d + 1] + p1) else: Lr3_3 = np.squeeze(Lr3_1 + p1) Lr3_4 = np.squeeze(np.min(Lr3[:, c + 1, :], axis=1) + p2) Lr3_5 = np.min(Lr3[:, c + 1, :], axis=1) Lr3[:, c, d] = c_v[:, c, d] + np.min(np.vstack([Lr3_1, Lr3_2, Lr3_3, Lr3_4]), axis=0) - Lr3_5 print('agg from down started.') Lr4[0, :, :] = c_v[-1, :, :] # border, last row for r in range(H - 2, -1, -1): for d in range(0, max_d): Lr4_1 = np.squeeze(Lr4[r + 1, :, d]) if d != 0: # disparity is not the bottom Lr4_2 = np.squeeze(Lr4[r + 1, :, d - 1] + p1) else: Lr4_2 = np.squeeze(Lr4_1 + p1) if d != max_d - 1: Lr4_3 = np.squeeze(Lr4[r + 1, :, d + 1] + p1) else: Lr4_3 = np.squeeze(Lr4_1 + p1) Lr4_4 = np.squeeze(np.min(Lr4[r + 1, :, :], axis=1) + p2) Lr4_5 = np.min(Lr4[r + 1, :, :], axis=1) Lr4[r, :, d] = c_v[r, :, d] + np.min(np.vstack([Lr4_1, Lr4_2, Lr4_3, Lr4_4]), axis=0) - Lr4_5 # print('agg from up-left started') # Lr5[0, :, :] = c_v[0, :, :] # for c in range(1, W): # for d in range(0, max_d): # if c <= W - H - 1: # The path does not need to be split. # for x, y in zip(range(c, W, 1), range(0, H, 1)): # Lr5_1 = Lr5[y - 1, x - 1, d] # if d != 0: # Lr5_2 = Lr5[y - 1, x - 1, d - 1] + p1 # else: # Lr5_2 = Lr5_1 + p1 # if d != max_d - 1: # Lr5_3 = Lr5[y - 1, x - 1, d + 1] + p1 # else: # Lr5_3 = Lr5_1 + p1 # Lr5_4 = np.min(Lr5[y - 1, x - 1, :], axis=0) + p2 # Lr5_5 = np.min(Lr5[y - 1, x - 1, :], axis=0) # # Lr5[y, x, d] = c_v[y, x, d] + min(Lr5_1, Lr5_2, Lr5_3, Lr5_4) - Lr5_5 # else: # the pass needs to be split # for x, y in zip(range(c, W, 1), range(0, W - c, 1)): # first part # Lr5_1 = Lr5[y - 1, x - 1, d] # if d != 0: # Lr5_2 = Lr5[y - 1, x - 1, d - 1] + p1 # else: # Lr5_2 = Lr5_1 + p1 # if d != max_d - 1: # Lr5_3 = Lr5[y - 1, x - 1, d + 1] + p1 # else: # Lr5_3 = Lr5_1 + p1 # Lr5_4 = np.min(Lr5[y - 1, x - 1, :], axis=0) + p2 # Lr5_5 = np.min(Lr5[y - 1, x - 1, :], axis=0) # # Lr5[y, x, d] = c_v[y, x, d] + min(Lr5_1, Lr5_2, Lr5_3, Lr5_4) - Lr5_5 # for x, y in zip(range(0, W, 1), range(W - c, H, 1)): # second part # if x == 0: # the head # Lr5_1 = Lr5[y - 1, W - 1, d] # if d != 0: # Lr5_2 = Lr5[y - 1, W - 1, d - 1] + p1 # else: # Lr5_2 = Lr5_1 + p1 # if d != max_d - 1: # Lr5_3 = Lr5[y - 1, W - 1, d + 1] + p1 # else: # Lr5_3 = Lr5_1 + p1 # Lr5_4 = np.min(Lr5[y - 1, W - 1, :], axis=0) + p2 # Lr5_5 = np.min(Lr5[y - 1, W - 1, :], axis=0) # # Lr5[y, x, d] = c_v[y, x, d] + min(Lr5_1, Lr5_2, Lr5_3, Lr5_4) - Lr5_5 # else: # Lr5_1 = Lr5[y - 1, x - 1, d] # if d != 0: # Lr5_2 = Lr5[y - 1, x - 1, d - 1] + p1 # else: # Lr5_2 = Lr5_1 + p1 # if d != max_d - 1: # Lr5_3 = Lr5[y - 1, x - 1, d + 1] + p1 # else: # Lr5_3 = Lr5_1 + p1 # Lr5_4 = np.min(Lr5[y - 1, x - 1, :], axis=0) + p2 # Lr5_5 = np.min(Lr5[y - 1, x - 1, :], axis=0) # # Lr5[y, x, d] = c_v[y, x, d] + min(Lr5_1, Lr5_2, Lr5_3, Lr5_4) - Lr5_5 return Lr1 + Lr2 + Lr3 + Lr4
71ff7bbbea8c3faebc8269d707198240c258c9c3
31,763
def send_approved_resource_email(user, request, reason): """ Notify the user the that their request has been approved. """ email_template = get_email_template() template = "core/email/resource_request_approved.html" subject = "Your Resource Request has been approved" context = { "support_email": email_template.email_address, "support_email_header": email_template.email_header, "support_email_footer": email_template.email_footer, "user": user.username, "request": request, "reason": reason } from_name, from_email = admin_address() user_email = lookupEmail(user.username) recipients = [email_address_str(user.username, user_email)] sender = email_address_str(from_name, from_email) return send_email_template(subject, template, recipients, sender, context=context, cc=[sender])
52571470104e802ef9bcc491fa614a9420710df5
31,764
from sphinx.util.nodes import traverse_parent import warnings def is_in_section_title(node: Element) -> bool: """Determine whether the node is in a section title""" warnings.warn('is_in_section_title() is deprecated.', RemovedInSphinx30Warning, stacklevel=2) for ancestor in traverse_parent(node): if isinstance(ancestor, nodes.title) and \ isinstance(ancestor.parent, nodes.section): return True return False
fb1e981e9ec8ad26cb49a144eb696d035dcbc2e8
31,765
def common_mgr(): """ Create a base topology. This uses the ExtendedNMLManager for it's helpers. """ # Create base topology mgr = ExtendedNMLManager(name='Graphviz Namespace') sw1 = mgr.create_node(identifier='sw1', name='My Switch 1') sw2 = mgr.create_node(identifier='sw2', name='My Switch 2') assert mgr.get_object('sw1') is not None assert mgr.get_object('sw2') is not None sw1p1 = mgr.create_biport(sw1) sw1p2 = mgr.create_biport(sw1) sw1p3 = mgr.create_biport(sw1) # noqa sw2p1 = mgr.create_biport(sw2) sw2p2 = mgr.create_biport(sw2) sw2p3 = mgr.create_biport(sw2) # noqa sw1p1_sw2p1 = mgr.create_bilink(sw1p1, sw2p1) # noqa sw1p2_sw2p2 = mgr.create_bilink(sw1p2, sw2p2) # noqa return mgr
e181b231bc859a6595417bbc63b695a00d7c3ae7
31,766
def reshape_array_h5pyfile(array,number_of_gatesequences,datapoints_per_seq): """reshaping function""" new_array = np.reshape(array,(number_of_gatesequences,datapoints_per_seq),order='F') #order is important, for data as column, #use order F, this will give an number_of_gatesequences x datapoints_per_seq matrix return new_array
c05edd9963396362f3f36f4293e28eb05fe22359
31,767
def build_value_counts_query(table: str, categorical_column: str, limit: int): """ Examples: SELECT {column_name}, COUNT (*) as frequency FROM `{table}` WHERE {not_null_string} GROUP BY {column_name} ORDER BY frequency DESC LIMIT {limit} Args: table: (string), full path of the table categorical_column: (string), name of the numerical column limit: (int), return the top counts Returns: string """ template = query_templates.VALUE_COUNTS_TEMPLATE not_null_string = _build_not_null_string([categorical_column]) query = template.format( table=table, column_name=categorical_column, limit=limit, not_null_string=not_null_string ) return query
605e25310e3c91c693d72e7e4eae3c513cea2a8b
31,768
from pathlib import Path def get_model_benchmarks_data(benchmark_runlogs_filepath: Path): """ Return Python dict with summary of model performance for one choice of training set size. """ benchmark_genlog = read_json(benchmark_runlogs_filepath) benchmark_runlog = read_json(benchmark_runlogs_filepath.parent / "runlog.json") assert benchmark_runlog["out.status"] == "SUCCESS" return { "pipeline_run_id": benchmark_runlog["parameters.pipeline_run_id"], "nr_train_images": benchmark_runlog["parameters.task.nr_train_images"], "runtime_ms": benchmark_runlog["out.timing.duration_ms"], "roc_auc": benchmark_genlog["key-values"]["roc_auc_class_mean"], }
494e94a371a682e84f211204b189f3d17727f1c0
31,769
import collections def make_labels(module_path, *names, **names_labels): """Make a namespace of labels.""" return collections.Namespace( *((name, Label(module_path, name)) for name in names), *((n, l if isinstance(l, Label) else Label(module_path, l)) for n, l in names_labels.items()), )
aaf0d204442bb9b712c2cf17babe45fd46905c8d
31,770
import socket def check_port_occupied(port, address="127.0.0.1"): """Check if a port is occupied by attempting to bind the socket and returning any resulting error. :return: socket.error if the port is in use, otherwise False """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.bind((address, port)) except socket.error as e: return e finally: s.close() return False
4bf302f89793df47a28cb2bd608abd4344f40ff2
31,771
def test_heterogeneous_multiagent_env( common_config, pf_config, multicomponent_building_config, pv_array_config, ev_charging_config ): """Test multiagent env with three heterogeneous agents.""" building_agent ={ "name": "building", "bus": "675c", "cls": MultiComponentEnv, "config": {"components": multicomponent_building_config} } # Next, build the PV and EV charging envs ev_agent = { "name": "ev-charging", "bus": "675c", "cls": EVChargingEnv, "config": ev_charging_config } pv_agent = { "name": "pv", "bus": "675c", "cls": PVEnv, "config": pv_array_config } agents = [building_agent, ev_agent, pv_agent] env_config = { "common_config": common_config, "pf_config": pf_config, "agents": agents } env = MultiAgentEnv(**env_config) return multi_agent_episode_runner(env)
75939f0e548001ab34f411d15ae822cc7b1c1790
31,772
import requests from bs4 import BeautifulSoup def find_sublinks(artist_link): """Some artists have that many songs so we have multiple pages for them. This functions finds all subpages for given artist e.g if we have page freemidi/queen_1 script go on that page and seek for all specific hyperlinks. as a return we could get [freemidi/queen_1, freemidi/queen_2, ...., freemidi/queen_n] Args: artist_link (str): link to the home page of the artist Returns: _type_: list of all pages with songs that can be reached from the artist_link """ links = [artist_link] URL = f"https://freemidi.org/{artist_link}" # as it's written it works only for freemidi page artist_page = requests.get(URL) artist_soup = BeautifulSoup(artist_page.content, "html.parser") #So we iterate over all specific hyperlinks, and add them to the list for a in artist_soup.find(class_="pagination").find_all("a"): link = a["href"] if link != "#": links.append(link) return links
3b24623d1cdbf4bf83f92a6e741576ff74e3facb
31,773
from unittest.mock import patch def class_mock(request, q_class_name, autospec=True, **kwargs): """Return mock patching class with qualified name *q_class_name*. The mock is autospec'ed based on the patched class unless the optional argument *autospec* is set to False. Any other keyword arguments are passed through to Mock(). Patch is reversed after calling test returns. """ _patch = patch(q_class_name, autospec=autospec, **kwargs) request.addfinalizer(_patch.stop) return _patch.start()
08bd1aacf75784668845ace13af6514461850d1a
31,774
import inspect def kwargs_only(fn): """Wraps function so that callers must call it using keyword-arguments only. Args: fn: fn to wrap. Returns: Wrapped function that may only be called using keyword-arguments. """ if hasattr(inspect, 'getfullargspec'): # For Python 3 args = inspect.getfullargspec(fn) varargs = args.varargs keywords = args.varkw else: # For Python 2 args = inspect.getargspec(fn) # pylint: disable=deprecated-method varargs = args.varargs keywords = args.keywords if varargs is not None: raise TypeError('function to wrap should not have *args parameter') if keywords is not None: raise TypeError('function to wrap should not have **kwargs parameter') arg_list = args.args has_default = [False] * len(arg_list) default_values = [None] * len(arg_list) has_self = arg_list[0] == 'self' if args.defaults: has_default[-len(args.defaults):] = [True] * len(args.defaults) default_values[-len(args.defaults):] = args.defaults def wrapped_fn(*args, **kwargs): """Wrapped function.""" if args: if not has_self or (has_self and len(args) != 1): raise TypeError('function %s must be called using keyword-arguments ' 'only.' % fn.__name__) if has_self: if len(args) != 1: raise TypeError('function %s has self argument but not called with ' 'exactly 1 positional argument' % fn.__name__) kwargs['self'] = args[0] kwargs_to_pass = {} for arg_name, arg_has_default, arg_default_value in zip( arg_list, has_default, default_values): if not arg_has_default and arg_name not in kwargs: raise TypeError('function %s must be called with %s specified' % (fn.__name__, arg_name)) kwargs_to_pass[arg_name] = kwargs.pop(arg_name, arg_default_value) if kwargs: raise TypeError('function %s called with extraneous kwargs: %s' % (fn.__name__, kwargs.keys())) return fn(**kwargs_to_pass) return wrapped_fn
cc5bb7d4d31d1bb392c306410c3c22267e93e891
31,775
def _labeling_complete(labeling, G): """Determines whether or not LPA is done. Label propagation is complete when all nodes have a label that is in the set of highest frequency labels amongst its neighbors. Nodes with no neighbors are considered complete. """ return all(labeling[v] in _most_frequent_labels(v, labeling, G) for v in G if len(G[v]) > 0)
130454cbb4a3bc77dfb94f97f20ad11e3239fb82
31,776
import _random def get_random_id_str(alphabet=None): """ Get random integer and encode it to URL-safe string. """ if not alphabet: alphabet = BASE62 n = _random(RANDOM_ID_SOURCE_BYTES) return int2str(n, len(alphabet), alphabet)
51d27c2838ccdd506e23aa2e707ac304e80c249c
31,777
def multivariate_normal_pdf(x, mean, cov): """Unnormalized multivariate normal probability density function.""" # Convert to ndarray x = np.asanyarray(x) mean = np.asanyarray(mean) cov = np.asarray(cov) # Deviation from mean dev = x - mean if isinstance(dev, np.ma.MaskedArray): if np.all(np.ma.getmaskarray(dev)): return np.ones(dev.shape[:-1]) else: dev = np.ma.getdata(dev) * ~np.ma.getmaskarray(dev) # Broadcast cov, if needed if cov.ndim <= dev.ndim: extra_dim = 1 + dev.ndim - cov.ndim cov = np.broadcast_to(cov, (1,) * extra_dim + cov.shape) exponent = -0.5 * np.einsum('...i,...i', dev, np.linalg.solve(cov, dev)) return np.exp(exponent) / np.sqrt(np.linalg.det(cov))
ce3c9171ee7cf78660118ecdab1949efce402827
31,778
def remove_below(G, attribute, value): """ Remove attribute below certain value Parameters ---------- G : nx.graph Graph attribute : str Attribute value : float Value Returns ------- G : nx.graph Graph """ # Assertions assert isinstance(G, nx.Graph), "G is not a NetworkX graph" # Calculation for node in G.nodes: if G.nodes[node][attribute] > value: G.nodes[node][attribute] = float('nan') return G
8d60ce75d8334d5de52b877a9fcd6a9c826c7418
31,779
def format_header(header_values): """ Formats a row of data with bolded values. :param header_values: a list of values to be used as headers :return: a string corresponding to a row in enjin table format """ header = '[tr][td][b]{0}[/b][/td][/tr]' header_sep = '[/b][/td][td][b]' return header.format(header_sep.join(header_values))
5b7cd734a486959660551a6d915fbbf52ae7ef1e
31,780
from typing import List from typing import Tuple import os def plot_confusion_matrix(cm: ndarray, classes: List[str], savefolder: str, filename: str = utils.timestamp() + '.png', figsize: Tuple[int, int] = (30,20)) -> str: """ Creates an image of a confusion matrix. :param cm: confusion matrix of shape (n_classes, n_classes) :param classes: string labels for each class of size n_classes :param savefolder: folder where the image will be saved :param filename: name for the image file :param figsize: size of the image :return: path to the saved image """ # construct a dataframe from the confusion matrix df_cm: DataFrame = pd.DataFrame(cm, index = [i for i in classes], columns = [i for i in classes]) # create a plot plt.figure( figsize = figsize ) # dye it as a heat map sn.heatmap(df_cm, annot=True, fmt='g') # save it into a file savepath: str = os.path.join(savefolder, filename) plt.savefig(savepath) return savepath
0d30a30d64844ca747b786fcb9257a2fdf438684
31,781
import random def getRandomWalk(initial_position: int, current_path: np.ndarray, adjacency_matrix: np.ndarray, heuristic: np.ndarray, pheromone: np.ndarray, alpha: float, max_lim: int, Q: float or None, R: float or None) -> np.ndarray: """ Function that given an array indicating the nodes traversed (path traversed by an ant), a binary adjacency matrix indicating the structure of the graph to be traversed and the parameters that regulate the stochastic choices that the ants will make when choosing their movements (alpha and beta parameters that regulates the influence of the pheromone and heuristic values on the decisions taken by the ants) returns a binary adjacency matrix indicating the path traversed by the ant. Parameters ---------- initial_position: int Integer indicating the initial position of the ant. current_path: np.ndarray (nodes), dtype=np.int8 Array with nodes visited by the ant. The current_path argument must include the initial position of the ant. adjacency_matrix: np.ndarray (nodes, nodes), dtype=np.int8 Binary adjacency matrix defining the structure of the graph to be traversed. heuristic: np.ndarray (nodes, nodes), dtype=np.float64 Heuristic information matrix used by the stochastic ant policy to decide the ant's movements. pheromone: np.ndarray (nodes, nodes), dtype=np.float64 Pheromone information matrix used by the stochastic ant policy to decide the ant's movements. The parameters of this matrix will be updated throughout the interactions of the algorithm. alpha: float Parameter that reference the influence of pheromones when the ant makes a decision on the path through the walk being constructed. max_lim: int Maximum path length. Q: float, default=None Parameter that determines the probability of selecting the next move deterministically by selecting the move to the node that has the highest probability. By default this parameter will not be considered. R: float, default=None Parameter that determines the probability of selecting the next move randomly without taking into account the computation of the pheromone matrix and heuristics. By default this parameter will not be considered. Returns ------- :np.ndarray (nodes) Array with the nodes visited by the ant arranged in the order in which they have been visited. """ movements = getValidPaths(initial_position, current_path, adjacency_matrix) n_partial_solutions = 1 # Add partial solutions to the current path as long as possible. while len(movements) > 0 and n_partial_solutions < max_lim: if len(movements) == 1: mov = movements[0] elif Q is not None and random.random() < Q: # Deterministic selection of the move probs = stochasticAS( initial_position, np.array(movements), heuristic, pheromone, alpha) mov = movements[np.argmax(probs)] elif R is not None and random.random() < R: # Random selection of the move mov = random.choice(movements) else: # Stochastic selection of the next move probs = stochasticAS( initial_position, np.array(movements), heuristic, pheromone, alpha) mov = movements[rouletteWheel(probs, random.random())] current_path = np.append(current_path, mov) movements = getValidPaths(mov, current_path, adjacency_matrix) initial_position = mov n_partial_solutions += 1 return current_path
a19a33cea8aadd58d99f87bb3ef111ce33df1ce7
31,782
def sample_circle(plane="xy", N=100): """Define all angles in a certain plane.""" phi = np.linspace(0, 2 * np.pi, N) if plane == "xy": return np.array([np.cos(phi), np.sin(phi), np.ones_like(phi)]) elif plane == "xz": return np.array([np.cos(phi), np.ones_like(phi), np.sin(phi)]) elif plane == "yz": return np.array([np.ones_like(phi), np.cos(phi), np.sin(phi)])
1546c3e74b5ef1f7d43fa3352a708b5f7acf03ae
31,783
from pathlib import Path from datetime import datetime def generate_today_word_cloud(path='images/'): """ generate today word cloud Args: path (str, optional): [description]. Defaults to 'images/'. """ terms_counts = get_term_count() if terms_counts: word_cloud = generate_word_cloud(terms_counts, drc_flag_color_map) word_cloud_path = Path.cwd().joinpath( path, 'word_cloud', datetime.today().strftime('%m-%d-%Y')) word_cloud_path = "{}.png".format(word_cloud.__str__) word_cloud.to_file(word_cloud_path) return word_cloud_path
f606135b181235eba5df4367d8721ff73e98af48
31,784
import importlib def load_attr(str_full_module): """ Args: - str_full_module: (str) correspond to {module_name}.{attr} Return: the loaded attribute from a module. """ if type(str_full_module) == str: split_full = str_full_module.split(".") str_module = ".".join(split_full[:-1]) str_attr = split_full[-1] module = importlib.import_module(str_module) return getattr(module, str_attr) else: return str_full_module
f96dd56c73745e76ccc9c48dda4ba8a6592ab54b
31,785
from typing import Dict from typing import List import math def conv(node: NodeWrapper, params: Dict[str, np.ndarray], xmap: Dict[str, XLayer]) -> List[XLayer]: """ONNX Conv to XLayer Conv conversion function""" logger.info("ONNX Conv -> XLayer Conv (+ BiasAdd)") assert len(node.get_outputs()) == 1 name = node.get_outputs()[0] bottoms = node.get_inputs() node_attrs = node.get_attributes() iX = xmap[bottoms[0]] # NCHW _, in_c, in_h, in_w = iX.shapes W_name = bottoms[1] wX = xmap[W_name] # OIHW B_name = bottoms[2] if len(bottoms) == 3 else None bX = xmap[B_name] if len(bottoms) == 3 else None auto_pad = node_attrs['auto_pad'] if 'auto_pad' in node_attrs\ else 'NOTSET' dilations = node_attrs['dilations'] if 'dilations' in node_attrs\ else [1, 1] dil_h, dil_w = dilations groups = node_attrs['group'] if 'group' in node_attrs\ else 1 kernel_shape = node_attrs['kernel_shape'] if 'kernel_shape' in node_attrs\ else wX.shapes[2:] kernel_h, kernel_w = kernel_shape pads = node_attrs['pads'] if 'pads' in node_attrs\ else None strides = node_attrs['strides'] if 'strides' in node_attrs\ else [1, 1] stride_h, stride_w = strides channels = wX.shapes[0] assert wX.shapes[1] == in_c // groups assert auto_pad == 'NOTSET' or pads is None if (auto_pad == 'NOTSET' and pads is None) or auto_pad == 'VALID': padding = [0, 0, 0, 0] # ht, hb, wl, wr elif auto_pad in ["SAME_UPPER", "SAME_LOWER"]: out_h, out_w = int(math.ceil(in_h / stride_h)), int(math.ceil(in_w / stride_w)) pad_h = (out_h - 1) * stride_h + (dil_h * (kernel_h - 1) + 1) - in_h pad_w = (out_w - 1) * stride_w + (dil_w * (kernel_w - 1) + 1) - in_w if auto_pad == "SAME_UPPER": pad_ht, pad_hb = pad_h // 2, pad_h - (pad_h // 2) pad_wl, pad_wr = pad_w // 2, pad_w - (pad_w // 2) else: pad_ht, pad_hb = pad_h - (pad_h // 2), pad_h // 2 pad_wl, pad_wr = pad_w - (pad_w // 2), pad_w // 2 padding = [pad_ht, pad_hb, pad_wl, pad_wr] else: assert len(pads) % 2 == 0 half = len(pads) // 2 padding = [] for i in range(half): padding.extend([pads[i], pads[i+half]]) # Quant_info (optional) vai_quant_in = node_attrs['vai_quant_in']\ if 'vai_quant_in' in node_attrs else [] vai_quant_out = node_attrs['vai_quant_out']\ if 'vai_quant_out' in node_attrs else [] vai_quant_weights = node_attrs['vai_quant_weights']\ if 'vai_quant_weights' in node_attrs else [] vai_quant_biases = node_attrs['vai_quant_biases']\ if 'vai_quant_biases' in node_attrs else [] vai_quant = node_attrs['vai_quant']\ if 'vai_quant' in node_attrs else [] conv_name = name if B_name is None else name + '_Conv' X = px.ops.conv2d( op_name=px.stringify(conv_name), input_layer=iX, weights_layer=wX, kernel_size=kernel_shape, strides=strides, padding_hw=padding, dilation=dilations, groups=groups, channels=channels, data_layout='NCHW', kernel_layout='OIHW', vai_quant=vai_quant, vai_quant_in=vai_quant_in, vai_quant_out=vai_quant_out, vai_quant_weights=vai_quant_weights, vai_quant_biases=vai_quant_biases, onnx_id=name ) res = [X] if B_name is not None: bias_add_X = xlf.get_xop_factory_func('BiasAdd')( op_name=px.stringify(name), axis=1, input_layer=X, bias_layer=bX, onnx_id=name ) res.append(bias_add_X) return res
7b004f41d103796ed01bc46e7dcff156171b35bd
31,786
def yices_bvconst_int32(n, x): """Conversion of an integer to a bitvector constant, returns NULL_TERM (-1) if there's an error. bvconst_int32(n, x): - n = number of bits - x = value The low-order bit of x is bit 0 of the constant. - if n is less than 32, then the value of x is truncated to n bits (i.e., only the n least significant bits of x are considered) - if n is more than 32, then the value of x is sign-extended to n bits. Error report: if n = 0 code = POS_INT_REQUIRED badval = n if n > YICES_MAX_BVSIZE code = MAX_BVSIZE_EXCEEDED badval = n. """ # let yices deal with int32_t excesses if n > MAX_INT32_SIZE: n = MAX_INT32_SIZE return libyices.yices_bvconst_int32(n, x)
b676ea0ea5b25f90b60f2efd67af553d835daa9b
31,787
from scipy.special import comb def combination(n, k): """ 组合数 n!/k!(n-k)! :param n: :param k: :return: """ return comb(n, k, exact=True)
b87f9037decd765680e0e2d5b5dfea336e014b61
31,788
def insert_prize(conn, number, prize): """ Insert de premios """ try: cur = conn.cursor() logger.debug('INSERT PRIZE - "%s" / "%s"', number, prize) cur.execute(INSERT_PRIZE_QUERY, (number, prize, prize)) conn.commit() cur.close() return True except mysql.connector.Error as e: logger.error(e) logger.error('INSERT PRIZE - "%s" / "%s"', number, prize) return False
f75e4f5b78e189aebb794c11f7b7bd00d3bb4101
31,789
def create_carray(h5file_uri, type, shape): """Creates an empty chunked array given a file type and size. h5file_uri - a uri to store the carray type - an h5file type shape - a tuple indicating rows/columns""" h5file = tables.openFile(h5file_uri, mode='w') root = h5file.root return h5file.createCArray( root, 'from_create_carray', type, shape=shape)
ca4c9605905a44b5f3027024f78cc855136472b0
31,790
def sort_dictionary_by_keys(input_dict): """ Sort the dictionary by keys in alphabetical order """ sorted_dict = {} for key in sorted(input_dict.keys()): sorted_dict[key] = input_dict[key] return sorted_dict
225df2c16d2b21740603c224319ad4b0eaa0899d
31,791
def sorted_instructions(binview): """ Return a sorted list of the instructions in the current viewport. """ addrs = [] instructions = [] for ii in binview.instructions: if ii[1] not in addrs: instructions.append(instr(ii)) addrs.append(ii[1]) del addrs instructions.sort(key=lambda x: x.address) return instructions
5f8602b80a73fc4b66bbb6d2e70070f2e9e35397
31,792
def quick_sort(seq): """ Реализация быстрой сортировки. Рекурсивный вариант. :param seq: любая изменяемая коллекция с гетерогенными элементами, которые можно сравнивать. :return: коллекция с элементами, расположенными по возрастанию. Examples: >>> quick_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> quick_sort([]) [] >>> quick_sort([-2, -5, -45]) [-45, -5, -2] """ length = len(seq) if length <= 1: return seq else: # В качестве pivot используется последний элемент. pivot = seq.pop() # lesser - часть коллекции, которая меньше pivot, будет тут. # greater - части коллекции, которая меньше pivot, будет тут. greater, lesser = [], [] for element in seq: if element > pivot: greater.append(element) else: lesser.append(element) # Рекурсивно вызывается функция сортировки отдельно для # greater и lesser. В конце все части объединяются в единую # коллекцию. Между ними вставляется pivot. return quick_sort(lesser) + [pivot] + quick_sort(greater)
46b56b5d29ca31a872e1805b66f4529a8bf48c6b
31,793
def _geocode(address): """ Like :func:`geocode` except returns the raw data instead. :param str address: A location (e.g., "Newark, DE") somewhere in the United States :returns: str """ key = _geocode_request(address) result = _get(key) if _CONNECTED else _lookup(key) if _CONNECTED and _EDITABLE: _add_to_cache(key, result) return result
f6cee8c606c5fe014c6c67787e0a9bcee70a0281
31,794
def easeOutBack(n, s=1.70158): """A tween function that overshoots the destination a little and then backs into the destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). """ _checkRange(n) n = n - 1 return n * n * ((s + 1) * n + s) + 1
bc5a0e34c2f7a16492c0456d8c28725888c6822c
31,795
def normalize_query_result(result, sort=True): """ Post-process query result to generate a simple, nested list. :param result: A QueryResult object. :param sort: if True (default) rows will be sorted. :return: A list of lists of RDF values. """ normalized = [[row[i] for i in range(len(row))] for row in result] return sorted(normalized) if sort else normalized
1df57ef889be041c41593766e1ce3cdd4ada7f66
31,796
from typing import List def count_jobpairs(buildpairs: List) -> int: """ :param buildpairs: A list of build pairs. :return: The number of job pairs in `buildpairs`. """ counts = [len(bp['jobpairs']) for bp in buildpairs] return sum(counts)
30c345698400fd134456abcf7331ca2ebbfec10f
31,797
def unravel_params(nn_params, input_layer_size, hidden_layer_size, num_labels, n_hidden_layers=1): """Unravels flattened array into list of weight matrices :param nn_params: Row vector of model's parameters. :type nn_params: numpy.array :param input_layer_size: Number of units in the input layer. :type input_layer_size: int :param hidden_layer_size: Number of units in a hidden layer. :type input_layer_size: int :param num_labels: Number of classes in multiclass classification. :type num_labels: int :param n_hidden_layers: Number of hidden layers in network. :type n_hidden_layers: int :returns: array with model's weight matrices. :rtype: numpy.array(numpy.array) """ input_layer_n_units = hidden_layer_size * (input_layer_size + 1) hidden_layer_n_units = hidden_layer_size * (hidden_layer_size + 1) theta = empty((n_hidden_layers + 1), dtype=object) # input layer to hidden layer theta[0] = nn_params[0:input_layer_n_units] theta[0] = reshape(theta[0], (hidden_layer_size, (input_layer_size + 1))) # hidden layer to hidden layer for i in range(1, n_hidden_layers): start = input_layer_n_units + (i - 1) * hidden_layer_n_units end = input_layer_n_units + i * hidden_layer_n_units theta[i] = nn_params[start:end] theta[i] = reshape( theta[i], (hidden_layer_size, (hidden_layer_size + 1))) # hidden layer to output layer start = input_layer_n_units + (n_hidden_layers - 1) * hidden_layer_n_units theta[n_hidden_layers] = nn_params[start:] theta[n_hidden_layers] = reshape(theta[n_hidden_layers], (num_labels, (hidden_layer_size + 1))) return theta
40703668ad74e4f6dbaf5c9c291da0c1c9528f60
31,798
def is_op_stack_var(ea, index): """ check if operand is a stack variable """ return idaapi.is_stkvar(idaapi.get_flags(ea), index)
b041cc56d8a0f772223b96cf5fa8bd6e338c777f
31,799