content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import math def calculatePredictions(ReviewsD, userIDTest, scoreTest, simmilarities): """ Function finds userIDTest in all simmilar items and uses all the scores for prediction calculation Returns actualScore and predictedScore for further calculations of finding rmse and mse values """ score = 0 sim = 0 sumB = 0 sumN = 0 # go over entire dictionary without testing(removed) item for itemID, userScoreOther in ReviewsD.items(): # if same users were found if (userIDTest in userScoreOther): # find simmilarity and score if (itemID in simmilarities): sim = simmilarities[itemID] if (sim == -1): continue score = userScoreOther[userIDTest] # calculations for prediction sumB += (score*sim) sumN += math.fabs(sim) if (sumB != 0 and sumN != 0): print("User: ", userIDTest) print("Actual score: ", scoreTest) print("Predicted score: ", math.fabs(sumB/sumN)) actualScore = scoreTest predictedScore = math.fabs(sumB/sumN) print(" ") # if predictions are found return (actualScore, predictedScore) else: # no predictions found return None
6b74b9d6ed4855030f2f7405190788db7e0dad52
5,800
def ensure_absolute_url(query_url): """This function adds the base URL to the beginning of a query URL if not already present. .. versionadded:: 3.2.0 :param query_url: The query URL that will be utilized in an API request :type query_url: str :returns: The query URL that includes a top-level domain :raises: :py:exc:`TypeError` """ if not base_url: raise errors.exceptions.MissingBaseUrlError() if query_url and not query_url.startswith('http'): query_url = f"{base_url}{query_url}" if query_url.startswith('/') else f"{base_url}/{query_url}" return query_url
eae729fc89515744615931a46dd87890863e5d7e
5,801
def create_data_source( simiotics_client: Simiotics, source_id: str, s3_root: str, ) -> data_pb2.Source: """ Registers an S3 data source against a Simiotics data registry Args: simiotics_client Simiotics client -- see the simiotics.client module source_id String identifying the source you would like to register s3_root Root under which all source samples may be found Returns: Source object """ source = data_pb2.Source( id=source_id, source_type=data_pb2.Source.SourceType.SOURCE_S3, data_access_spec=s3_root, ) request = data_pb2.RegisterSourceRequest( version=simiotics_client.client_version, source=source, ) response = simiotics_client.data_registry.RegisterSource(request) return response.source
c00de849d20017efd12395eb5c097f95d5efe207
5,802
def func_2(x: float, c: float, d: float) -> float: """ Test function 2. """ return x + c + d
b95400c6779c0e64e7bb6cda493c0ee5e6f05f7c
5,803
import aiohttp async def async_get(server: t.Union[Server, str], view_or_url: str, view_data: Kwargs = None, session: aiohttp.ClientSession = None, params: Kwargs = None, **kwargs) -> Response: """Sends a GET request.""" return await async_request('get', server, view_or_url, view_data=view_data, session=session, params=params, **kwargs)
fe8bb90c78df758e48971978831de5553809db48
5,804
def DictionaryAddSchemaVersion(builder, schemaVersion): """This method is deprecated. Please switch to AddSchemaVersion.""" return AddSchemaVersion(builder, schemaVersion)
cad601667ec715e9519de02d23ee0b13f3903285
5,805
import math def isInner(x1, y1, x2, y2, scale): """ Currently, it's a rectangular kernal Other options: rectangular f(x) = 1 if a <= scale <= b else 0 I don't get the rest of them http://saravananthirumuruganathan.wordpress.com/2010/04/01/introduction-to-mean-shift-algorithm/ """ distance = math.sqrt( ((x1-x2)**2) + ((y1-y2)**2) ) return distance <= scale
b2c715b33ae8b38fdfd19c71b54ee3980b336eeb
5,806
def add_sulci(fig, dataview, extents=None, height=None, with_labels=True, overlay_file=None, **kwargs): """Add sulci layer to figure Parameters ---------- fig : figure or ax figure into which to plot image of curvature dataview : cortex.Dataview object dataview containing data to be plotted, subject (surface identifier), and transform. extents : array-like 4 values for [Left, Right, Top, Bottom] extents of image plotted. None defaults to extents of images already present in figure. height : scalar Height of image. None defaults to height of images already present in figure. with_labels : bool Whether to display text labels for sulci Other Parameters ---------------- kwargs : keyword arguments Keywords args govern line appearance in final plot. Allowable kwargs are : linewidth, linecolor Returns ------- img : matplotlib.image.AxesImage matplotlib axes image object for plotted data """ svgobject = db.get_overlay(dataview.subject, overlay_file=overlay_file) svg_kws = _convert_svg_kwargs(kwargs) layer_kws = _parse_defaults('sulci_paths') layer_kws.update(svg_kws) sulc = svgobject.get_texture('sulci', height, labels=with_labels, **layer_kws) if extents is None: extents = _get_extents(fig) _, ax = _get_fig_and_ax(fig) img = ax.imshow(sulc, aspect='equal', interpolation='bicubic', extent=extents, label='sulci', zorder=5) return img
20d532a107f472a8f83a9a14c9ee85b54270dd08
5,807
def build_puller_tdwhdfs_config_param( cluster_name, connector, data_id, topic, kafka_bs, fs_default_name, hadoop_job_ugi, hdfs_data_dir, username, secure_id, secure_key, ): """ tdw 数据拉取任务配置 :param cluster_name: 集群名 :param connector: 任务名 :param data_id: dataid :param topic: 目标topic :param kafka_bs: 目标kafka地址 :param fs_default_name: hdfs地址 :param hadoop_job_ugi: 内部版tdw的ugi :param hdfs_data_dir: 数据目录 :param username: tdw提供的用户名 :param secure_id: tdw提供的secure_id :param secure_key: tdw提供的secure_key :return: config """ task_config = { "connector": connector, "dataId": "%s" % data_id, "fsDefaultName": fs_default_name, "hadoopJobUgi": hadoop_job_ugi, "hdfsDataDir": hdfs_data_dir, "hdfsConfDir": dataapi_settings.HDFS_DEFAULT_PULSAR_CONF_DIR, } tenant = settings.PULSAR_OUTER_TENANT namespace = settings.PULSAR_OUTER_NAMESPACE pulsar_topic = "persistent://{}/{}/{}".format(tenant, namespace, topic) config = { "topicName": pulsar_topic, "parallelism": 1, "archive": "builtin://databus_tdw_puller", "schemaType": "STRING", "configs": task_config, } return config
1c74207b2903a9672d56ac447576504e514493a8
5,808
def make_url(issue, sites=[]): """ Compose search terms and sites with url safe encoding. """ print('issue', issue) terms = issue.strip().split() terms = [quote(x, safe='') for x in terms] # TODO test with just spaces url = 'https://duckduckgo.com/?q=' + '+'.join(terms) if sites: url += '+' + quote('site:' + ','.join(sites)) + '&ia=web' print(url) return url
cb6193f7164a16e731874070e8a824273bfbc49f
5,809
import pandas def order_by_digestibility(sv_reg, pft_id_set, aoi_path): """Calculate the order of feed types according to their digestibility. During diet selection, animals select among feed types in descending order by feed type digestibility. Because digestibility is linearly related to crude protein content, the order of feed types may be estimated from their nitrogen to carbon ratios. Order feed types by digestibility according to the mean nitrogen to carbon ratio of each feed type across the study area aoi. Parameters: sv_reg (dict): map of key, path pairs giving paths to state variables for the previous month, including C and N in aboveground live and standing dead pft_id_set (set): set of integers identifying plant functional types aoi_path (string): path to vector layer giving the spatial extent of the model Returns: ordered_feed_types, a list of strings where each string designates a feed type by a combination of pft_i and fraction (aboveground live or standing dead), in descending order of digestibility """ def calc_nc_ratio(cstatv_path, nstatv_path, aoi_path): """Calculate the mean nitrogen to carbon ratio of a biomass fraction. Calculate the mean nitrogen to carbon ratio of a biomass fraction falling inside the study area aoi. The ratio is calculated from the state variables representing carbon and nitrogen content of that biomass fraction. If the area of interest vector dataset contains more than one polygon feature, the average ratio is calculated across features. Parameters: cstatv_path (string): path to raster containing carbon in the biomass fraction nstatv_path (string): path to raster containing nitrogen in the biomass fraction aoi_path (string): path to vector layer defining the study area of interest Returns: nc_ratio, the ratio of mean nitrogen to mean carbon for this state variable inside the model area of interest """ carbon_zonal_stat_df = pandas.DataFrame.from_dict( pygeoprocessing.zonal_statistics((cstatv_path, 1), aoi_path), orient='index') if carbon_zonal_stat_df['count'].sum() == 0: return 0 else: mean_carbon = ( carbon_zonal_stat_df['sum'].sum() / carbon_zonal_stat_df['count'].sum()) nitrogen_zonal_stat_df = pandas.DataFrame.from_dict( pygeoprocessing.zonal_statistics((nstatv_path, 1), aoi_path), orient='index') if nitrogen_zonal_stat_df['count'].sum() == 0: mean_nitrogen = 0 else: mean_nitrogen = ( nitrogen_zonal_stat_df['sum'].sum() / nitrogen_zonal_stat_df['count'].sum()) return (mean_nitrogen / mean_carbon) nc_ratio_dict = {} for pft_i in pft_id_set: for statv in ['agliv', 'stded']: cstatv_path = sv_reg['{}c_{}_path'.format(statv, pft_i)] nstatv_path = sv_reg['{}e_1_{}_path'.format(statv, pft_i)] nc_ratio = calc_nc_ratio(cstatv_path, nstatv_path, aoi_path) nc_ratio_dict['{}_{}'.format(statv, pft_i)] = nc_ratio # order the dictionary by descending N/C ratio keys, get list from values sorted_list = sorted( [(ratio, feed_type) for (feed_type, ratio) in nc_ratio_dict.items()], reverse=True) ordered_feed_types = [feed_type for (ratio, feed_type) in sorted_list] return ordered_feed_types
f586cbecea72c1bf5a901908f4f9d1414f3d6b93
5,810
def match(pattern, sexp, known_bindings={}): """ Determine if sexp matches the pattern, with the given known bindings already applied. Returns None if no match, or a (possibly empty) dictionary of bindings if there is a match Patterns look like this: ($ . $) matches the literal "$", no bindings (mostly useless) (: . :) matches the literal ":", no bindings (mostly useless) ($ . A) matches B iff B is an atom; and A is bound to B (: . A) matches B always; and A is bound to B (A . B) matches (C . D) iff A matches C and B matches D and bindings are the unification (as long as unification is possible) """ if not pattern.listp(): if sexp.listp(): return None return known_bindings if pattern.as_atom() == sexp.as_atom() else None left = pattern.first() right = pattern.rest() atom = sexp.as_atom() if left == ATOM_MATCH: if sexp.listp(): return None if right == ATOM_MATCH: if atom == ATOM_MATCH: return {} return None return unify_bindings(known_bindings, right.as_atom(), sexp) if left == SEXP_MATCH: if right == SEXP_MATCH: if atom == SEXP_MATCH: return {} return None return unify_bindings(known_bindings, right.as_atom(), sexp) if not sexp.listp(): return None new_bindings = match(left, sexp.first(), known_bindings) if new_bindings is None: return new_bindings return match(right, sexp.rest(), new_bindings)
5389534e437d9090b29af8137d9d106c6550941d
5,811
def who_is_it(image_path, database, model): """ Implements face recognition for the happy house by finding who is the person on the image_path image. Arguments: image_path -- path to an image database -- database containing image encodings along with the name of the person on the image model -- your Inception model instance in Keras Returns: min_dist -- the minimum distance between image_path encoding and the encodings from the database identity -- string, the name prediction for the person on image_path """ ### START CODE HERE ### ## Step 1: Compute the target "encoding" for the image. Use img_to_encoding() see example above. ## (≈ 1 line) encoding = img_to_encoding(image_path, model) ## Step 2: Find the closest encoding ## # Initialize "min_dist" to a large value, say 100 (≈1 line) min_dist = 100 # Loop over the database dictionary's names and encodings. for (name, db_enc) in database.items(): # Compute L2 distance between the target "encoding" and the current "emb" from the database. (≈ 1 line) dist = np.linalg.norm(encoding - db_enc) # If this distance is less than the min_dist, then set min_dist to dist, and identity to name. (≈ 3 lines) if dist < min_dist: min_dist = dist identity = name ### END CODE HERE ### if min_dist > 0.7: print("Not in the database.") else: print ("it's " + str(identity) + ", the distance is " + str(min_dist)) return min_dist, identity
60136acaaf1ef95a06917828eb9d545e9c802d59
5,812
def generate_map_bin(geo, img_shape): """Create a q map and the pixel resolution bins Parameters ---------- geo : pyFAI.geometry.Geometry instance The calibrated geometry img_shape : tuple, optional The shape of the image, if None pull from the mask. Defaults to None. Returns ------- q : ndarray The q map qbin : ndarray The pixel resolution bins """ r = geo.rArray(img_shape) q = geo.qArray(img_shape) / 10 # type: np.ndarray q_dq = geo.deltaQ(img_shape) / 10 # type: np.ndarray pixel_size = [getattr(geo, a) for a in ["pixel1", "pixel2"]] rres = np.hypot(*pixel_size) rbins = np.arange(np.min(r) - rres / 2., np.max(r) + rres / 2., rres / 2.) rbinned = BinnedStatistic1D(r.ravel(), statistic=np.max, bins=rbins) qbin_sizes = rbinned(q_dq.ravel()) qbin_sizes = np.nan_to_num(qbin_sizes) qbin = np.cumsum(qbin_sizes) qbin[0] = np.min(q_dq) if np.max(q) > qbin[-1]: qbin[-1] = np.max(q) return q, qbin
964cbc13eb652acbdf85f656bb9d789c5f1949e5
5,813
import sys import os def read_poly_data(filename): """ This function.. :param filename: :return: """ # Check which PolyData reader should be used if ".vtk" in filename: reader = vtk.vtkPolyDataReader() reader.SetFileName(filename) reader.Update() return reader.GetOutput() elif ".vtp" in filename: reader = vtk.vtkXMLPolyDataReader() reader.SetFileName(filename) reader.Update() return reader.GetOutput() else: print("ERROR: Failed to read in polydata") return sys.exit(os.EX_IOERR)
740d1a73121caef0e31c33b5d44c9d70d0865d5f
5,814
def wklobjective_converged(qsum, f0, plansum, epsilon, gamma): """Compute finale wkl value after convergence.""" obj = gamma * (plansum + qsum) obj += epsilon * f0 obj += - (epsilon + 2 * gamma) * plansum return obj
079841a8ee6d845cdac25a48306c023a1f38b5f7
5,815
def addFavDirections(request): """ Add favourite stop of currently logged in user by number. Currently works with the URL: http://localhost:8000/api/add-fav-stop/<number> """ try: user = request.user origin = str(request.query_params.get('origin')) destination = str(request.query_params.get('destination')) url = str(request.query_params.get('url')) r = FavouriteDirections(user=user, origin=origin, destination=destination, url=url) r.save() return HttpResponse(status=status.HTTP_201_CREATED) except IntegrityError as e: return HttpResponse( "Error: Stop is already a favourite for this user.") except AssertionError as e: return HttpResponse("Error: Stop number does not exist.")
2585006e5ea1f73433671c984dce5ce4e8ed2079
5,816
import types import numpy import pandas def hpat_pandas_dataframe_index(df): """ Intel Scalable Dataframe Compiler User Guide ******************************************** Pandas API: pandas.DataFrame.index Examples -------- .. literalinclude:: ../../../examples/dataframe/dataframe_index.py :language: python :lines: 27- :caption: The index (row labels) of the DataFrame. :name: ex_dataframe_index .. command-output:: python ./dataframe/dataframe_index.py :cwd: ../../../examples Intel Scalable Dataframe Compiler Developer Guide ************************************************* Pandas DataFrame attribute :attr:`pandas.DataFrame.index` implementation. .. only:: developer Test: python -m sdc.runtests -k sdc.tests.test_dataframe.TestDataFrame.test_index* """ ty_checker = TypeChecker('Attribute index.') ty_checker.check(df, DataFrameType) if isinstance(df.index, types.NoneType): empty_df = not df.columns def hpat_pandas_df_index_none_impl(df): if empty_df == True: # noqa return numpy.arange(0) else: return pandas.RangeIndex(len(df)) return hpat_pandas_df_index_none_impl else: def hpat_pandas_df_index_impl(df): return df._index return hpat_pandas_df_index_impl
64b512d170ca5734a416688a9728f535248e9395
5,817
def _get_should_cache_fn(conf, group): """Build a function that returns a config group's caching status. For any given object that has caching capabilities, a boolean config option for that object's group should exist and default to ``True``. This function will use that value to tell the caching decorator if caching for that object is enabled. To properly use this with the decorator, pass this function the configuration group and assign the result to a variable. Pass the new variable to the caching decorator as the named argument ``should_cache_fn``. :param conf: config object, must have had :func:`configure` called on it. :type conf: oslo_config.cfg.ConfigOpts :param group: name of the configuration group to examine :type group: string :returns: function reference """ def should_cache(value): if not conf.cache.enabled: return False conf_group = getattr(conf, group) return getattr(conf_group, 'caching', True) return should_cache
7a11124c640bfb3ced28e2d9395593b70dc85a0a
5,818
def set_difference(lst1, lst2): """returns the elements and indicies of elements in lst1 that are not in lst2""" elements = [] indicies = [] for indx, item in enumerate(lst1): if item not in lst2: elements.append(item) indicies.append(indx) return elements, indicies
75e78de68fb2528341f7246b77f7046da2c9274f
5,819
import json def _get_dict(path): """ Parameters __________ path: string or array Path to json file. In case a list of paths is provided instead, read them all and merge then into a single dict. Assumes depth two. Returns _______ d: dict Dictionary containing marker information. d = { key: { subkey: [...], ... }, ... } """ # TODO straighten up the spaghetti if isinstance(path, str): with open(path, "r") as f: return json.load(f) else: d = {} for path in path: with open(path, "r") as f: d_part = json.load(f) for key in d_part: if key in d: for subkey in d_part[key]: if subkey in d[key]: # to remove duplicates d[key][subkey] = list(set().union( d[key][subkey], d_part[key][subkey])) else: d[key][subkey] = d_part[key][subkey] else: d[key] = d_part[key] return d
fddeff3bdfb0c70b1f95c2cf26d164c95d4065c2
5,820
def _super_check(args, names, op, fmt, msg, val_err): """ A flexible function is used to check whether type or value of variables is valid, which supports in both graph/pynative mode. Args: args(any): 'args' is used as one of argument for operation function and format function. names(any): 'names' is used as one of argument for format function. op(str): 'op' is a string to specify an operation. This operation will be obtained an actual function from a StringDict object, with 'args' as argument. fmt(str): 'fmt' is a string to specify a format. This format will be obtained an actual function from a StringDict object, with 'args' and 'names' as arguments. msg(str, tuple): 'msg' is used the case where format function is not necessary. When 'msg' is not None, we will throw the 'msg' as the error message. val_err(bool): Determine the type of TypeError/ValueError. When 'val_err' is True, raises ValueError, otherwise TypeError. Note: This function does not contain any parameter checks. """ op_fn = _op_dict.get(op) if not op_fn(args): if not msg: fmt_fn = _fmt_dict.get(fmt) msg = fmt_fn(args, names) if val_err: _raise_value_error(*_tuple(msg)) else: _raise_type_error(*_tuple(msg)) return args
07c63f34216e84c10c5ff0c2f886d27aaaf5f245
5,821
import json def obter_novo_username() -> str: """ -> Pede um novo nome de usuário. :return: Retorna o novo nome de usuário. """ username = input('Qual é o seu nome? ') arquivo = 'arquivos_json/nome_de_usuario.json' with open(arquivo, 'w') as obj_arq: json.dump(username, obj_arq) return username
b4d4922d68b1fb80e5a9270638d134b5806969fd
5,822
def BDD100K_MOT2020(path: str) -> Dataset: """`BDD100K_MOT2020 <https://bdd-data.berkeley.edu>`_ dataset. The file structure should be like:: <path> bdd100k_box_track_20/ images/ train/ 00a0f008-3c67908e/ 00a0f008-3c67908e-0000001.jpg ... ... val/ b1c9c847-3bda4659/ b1c9c847-3bda4659-0000001.jpg ... ... test/ cabc30fc-e7726578/ cabc30fc-e7726578-0000001.jpg ... ... labels/ train/ 00a0f008-3c67908e.json ... val/ b1c9c847-3bda4659.json ... Arguments: path: The root directory of the dataset. Returns: Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance. """ return _tracking_loader(path, "mot")
a850b874da64d9efaf13ae3f1f5ca79805f5307d
5,823
import logging def _parse_block_postheader(line): """ (209)**************!*****************!!*************... """ parts = line[1:].split(')', 1) qlen = int(parts[0]) if not len(parts[1]) == qlen: logging.warn("postheader expected %d-long query, found %d", qlen, len(parts[1])) return qlen, parts[1]
5eee6c11160c0f91cb37c025d6d265188488cad9
5,824
def _remove_dimer_outliers(bond_lengths, energies, zscore_cutoff=3.0): """Removes outliers """ z_score = stats.zscore(energies) idx_keep = np.where(z_score < zscore_cutoff)[0] return bond_lengths[idx_keep], energies[idx_keep]
409ca918213315cfeb3d279319f42bf6ca5651a5
5,825
def get_auth_token(cmd_args=None): """ :param cmd_args: An optional list of additional arguments to pass on the command line :return: The current user's token """ r = Result("whoami") r.add_action(oc_action(cur_context(), "whoami", cmd_args=['-t', cmd_args])) r.fail_if("Unable to determine current token") return r.out().strip()
01edde0d4738a96d25dbea37d3d539e8a8aee7ca
5,826
import os def _is_toplevel_repository_dir(directory): """Returns if a directory is a git or mercurial directory. This works by searching for a file or directory named `.git` or `.hg` in the directory. This works for both submodules and normal repositories. """ return (os.path.exists(os.path.join(directory, ".git")) or os.path.exists(os.path.join(directory, ".hg")))
25db538b6ef4f7febbdb282561885ff807f03bbe
5,827
import sys def breakOnException(func=None, *, exceptionList=Exception, debugger='pdb'): """ A function wrapper that causes debug mode to be entered when the wrapped function throws a specified exception. Parameters ---------- func : The function to wrap. exceptionList : An exception or tuple of exceptions to break on. debugger : The debugger used when debug mode is entered. This can be either the debugging module itself or a string containing the name of the debugging module. Currently, pdb and ipdb are supported. """ if func is None: return partial(breakOnException, exceptionList=exceptionList, debugger=debugger) debugger = import_(debugger) @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except exceptionList as e: debug_frame = sys._getframe().f_back set_trace(debug_frame, debugger) return wrapper
9b031e230fc822b03d88bba65a2962f0c71ece30
5,828
import re def hash_sid(sid: str) -> str: """ Hash a SID preserving well-known SIDs and the RID. Parameters ---------- sid : str SID string Returns ------- str Hashed SID """ if re.match(WK_SID_PATTERN, sid): return sid usr_sid = re.match(SID_PATTERN, sid) if usr_sid: return ( f"{usr_sid.groups()[0]}{hash_item(usr_sid.groups()[1], delim='-')}" + f"{usr_sid.groups()[2]}" ) return sid
5966d82f1412f7bfdb64e6a9b4904861f43e7c46
5,829
def horizontal_move(t, h_speed=-2/320): """Probe moves horizontally at h_speed [cm/s]""" return 0.*t, h_speed*t, 2/16 + 0*t
d9cf0e5b968e7d8319b7f63f7d1d7a4666484ad3
5,830
def fix_pdp_post(monkeypatch): """monkeyed request /decision/v1 to PDP""" def monkeyed_policy_rest_post(uri, json=None, **kwargs): """monkeypatch for the POST to policy-engine""" return MockHttpResponse("post", uri, json=json, **kwargs) _LOGGER.info("setup fix_pdp_post") pdp_client.PolicyRest._lazy_inited = False pdp_client.PolicyRest._lazy_init() monkeypatch.setattr('policyhandler.pdp_client.PolicyRest._requests_session.post', monkeyed_policy_rest_post) yield fix_pdp_post _LOGGER.info("teardown fix_pdp_post")
18957f2c9f3501ec54962e39fe664a0221133566
5,831
def del_project(): """ @api {post} /v1/interfaceproject/del InterfaceProject_删除项目 @apiName interfaceProDel @apiGroup Interface @apiDescription 删除项目 @apiParam {int} id 子项目id @apiParam {int} all_project_id 总项目id @apiParamExample {json} Request-Example: { "id": 1, "all_project_id": 4 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "data": { "environment_choice": "first", "headers": [], "host": [ "http://sx.api.mengtuiapp.com" ], "host_four": [], "host_three": [], "host_two": [], "principal": null, "pro_name": "mengtui", "user_id": 3, "variables": [] }, "status": 1 } """ data = request.json ids = data.get('id') all_project_id = data.get('all_project_id') jsondata = InterfaceProjectBusiness.del_project(ids, all_project_id) return jsondata
4697360bbdab0ce3b4b10ebbdd1fab66b938fb4b
5,832
from skimage.transform import resize from enum import Enum def draw_pattern_fill(viewport, psd, desc): """ Create a pattern fill. """ pattern_id = desc[Enum.Pattern][Key.ID].value.rstrip('\x00') pattern = psd._get_pattern(pattern_id) if not pattern: logger.error('Pattern not found: %s' % (pattern_id)) return None, None panel = get_pattern(pattern) assert panel.shape[0] > 0 scale = float(desc.get(Key.Scale, 100.)) / 100. if scale != 1.: new_shape = ( max(1, int(panel.shape[0] * scale)), max(1, int(panel.shape[1] * scale)) ) panel = resize(panel, new_shape) height, width = viewport[3] - viewport[1], viewport[2] - viewport[0] reps = ( int(np.ceil(float(height) / panel.shape[0])), int(np.ceil(float(width) / panel.shape[1])), 1, ) channels = EXPECTED_CHANNELS.get(pattern.image_mode) pixels = np.tile(panel, reps)[:height, :width, :] if pixels.shape[2] > channels: return pixels[:, :, :channels], pixels[:, :, -1:] return pixels, None
5d03ae9ebf13b3aa39c9d7f56a68a4a9056331cc
5,833
def register_post(): """Registriraj novega uporabnika.""" username = bottle.request.forms.username password1 = bottle.request.forms.password1 password2 = bottle.request.forms.password2 # Ali uporabnik že obstaja? c = baza.cursor() c.execute("SELECT 1 FROM uporabnik WHERE username=%s", [username]) if c.fetchone(): # Uporabnik že obstaja return bottle.template("registracija.html", username='', napaka='To uporabniško ime je že zavzeto') elif not password1 == password2: # Gesli se ne ujemata return bottle.template("registracija.html", username='', napaka='Gesli se ne ujemata') else: # Vse je v redu, vstavi novega uporabnika v bazo password = password_md5(password1) print('tukaj sem') c.execute("INSERT INTO uporabnik (username, password) VALUES (%s, %s)", (username, password)) bottle.redirect("/prijava/")
7c6569828f33287b7ea19ab37eb0ac868fd87c0a
5,834
def check_format_input_vector( inp, dims, shape_m1, sig_name, sig_type, reshape=False, allow_None=False, forbid_negative0=False, ): """checks vector input and returns in formatted form - inp must be array_like - convert inp to ndarray with dtype float - inp shape must be given by dims and shape_m1 - print error msg with signature arguments - if reshape=True: returns shape (n,3) - required for position init and setter - if allow_None: return None - if extend_dim_to2: add a dimension if input is only (1,2,3) - required for sensor pixel """ if allow_None: if inp is None: return None is_array_like( inp, f"Input parameter `{sig_name}` must be {sig_type}.\n" f"Instead received type {type(inp)}.", ) inp = make_float_array( inp, f"Input parameter `{sig_name}` must contain only float compatible entries.\n", ) check_array_shape( inp, dims=dims, shape_m1=shape_m1, msg=( f"Input parameter `{sig_name}` must be {sig_type}.\n" f"Instead received array_like with shape {inp.shape}." ), ) if reshape: return np.reshape(inp, (-1, 3)) if forbid_negative0: if np.any(inp <= 0): raise MagpylibBadUserInput( f"Input parameter `{sig_name}` cannot have values <= 0." ) return inp
cd26290058fbf9fba65a5ba005eaa8bd6da23a32
5,835
def get_proyecto_from_short_url(short_url): """ :param short_url: :return: item for Proyecto """ item = Proyecto.objects.get(short_url=short_url) if item.iniciativas_agrupadas is not None and \ item.iniciativas_agrupadas != '' and '{' in \ item.iniciativas_agrupadas: iniciativas = item.iniciativas_agrupadas.replace("{", "") iniciativas = iniciativas.replace("}", "") item.iniciativas_agrupadas = iniciativas.split(",") item.congresistas_with_links = hiperlink_congre(item.congresistas) item.fecha_presentacion = convert_string_to_time(item.fecha_presentacion) item.fecha_presentacion_human = arrow.get(item.fecha_presentacion).format('DD MMMM, YYYY', locale='es_es') item.numero_congresistas = len(item.congresistas.split(";")) return item
8f48d62db11bb80803ce13e259eed1b826a2450c
5,836
def select_x(data, order=None): """ Helper function that does a best effort of selecting an automatic x axis. Returns None if it cannot find x axis. """ if data is None: return None if len(data) < 1: return None if order is None: order = ['T', 'O', 'N', 'Q'] else: _validate_custom_order(order) d = _classify_data_by_type(data, order) chosen_x = None for typ in order: if len(d[typ]) >= 1: chosen_x = d[typ][0] break return chosen_x
8efe25aea57444093fe19abcf8df07080c2ec0a6
5,837
def map_clonemode(vm_info): """ Convert the virtualbox config file values for clone_mode into the integers the API requires """ mode_map = {"state": 0, "child": 1, "all": 2} if not vm_info: return DEFAULT_CLONE_MODE if "clonemode" not in vm_info: return DEFAULT_CLONE_MODE if vm_info["clonemode"] in mode_map: return mode_map[vm_info["clonemode"]] else: raise SaltCloudSystemExit( "Illegal clonemode for virtualbox profile. Legal values are: {}".format( ",".join(mode_map.keys()) ) )
39b62c11dbf9f168842a238d23f587aa64a0ff61
5,838
def update_dashboard(dashboard_slug): """Update Dashboard Update an existing Dashboard --- tags: - "Dashboards" parameters: - name: dashboard_slug in: path type: string required: true - name: name in: body schema: type: object required: - name properties: name: type: string description: new name for dashboard responses: 200: description: Success schema: type: object properties: message: type: string dashboard: $ref: '#/definitions/Dashboard' 400: $ref: '#/responses/Error' """ dashboard = Dashboard.query.filter_by( slug=dashboard_slug, owner=current_user ).first() name = request.json.get("name", None) if not name: return jsonify(error="Name is required."), 400 if Dashboard.query.filter_by(slug=slugify(name), owner=current_user).first(): return jsonify(error="A dashboard with that name already exists!"), 400 if dashboard: dashboard.set_name(name) db.session.commit() return jsonify( message="Dashboard updated successfully!", dashboard=dashboard.to_dict() ) else: return jsonify(error="Dashboard doesn't exist!"), 400
90ecfd68f6c64076893248aa7a2de58ed01afe02
5,839
import torch def dqn(n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995, target=100.0, model='checkpoint.pth'): """Deep Q-Learning. Params ====== n_episodes (int): maximum number of training episodes max_t (int): maximum number of timesteps per episode eps_start (float): starting value of epsilon, for epsilon-greedy action selection eps_end (float): minimum value of epsilon eps_decay (float): multiplicative factor (per episode) for decreasing epsilon target (float): desired minimal average per 100 episodes model (str): path to save model """ scores = [] # list containing scores from each episode scores_window = deque(maxlen=100) # last 100 scores eps = eps_start # initialize epsilon for i_episode in range(1, n_episodes+1): env_info = env.reset(train_mode=True)[brain_name] # reset the environment state = env_info.vector_observations[0] score = 0 for t in range(max_t): action = agent.act(state, eps) env_info = env.step(action)[brain_name] # send the action to the environment next_state = env_info.vector_observations[0] # get the next state reward = env_info.rewards[0] # get the reward done = env_info.local_done[0] # see if episode has finished agent.step(state, action, reward, next_state, done) state = next_state score += reward if done: break scores_window.append(score) # save most recent score scores.append(score) # save most recent score eps = max(eps_end, eps_decay*eps) # decrease epsilon print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="") if i_episode % 100 == 0: print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) if np.mean(scores_window) >= target: print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window))) torch.save(agent.qnetwork_local.state_dict(), model) break return scores
433484a848f645e4581702934748c428b5d59adf
5,840
def latLon2XY(xr, yr, lat, lon, ieast=1, azimuth=0): """ Calculate the cartesian distance between consecutive lat,lon points. Will bomb at North and South Poles. Assumes geographical coordinates and azimuth in decimal degrees, local Cartesian coordinates in km. :param xr: Reference longitude, normally 0. :param yr: Reference latitude, normally 0. :param lat: Array of latitudes. :param lon: Array of longitudes. :param int ieast: 1 if longitude increases toward the East (normal case), -1 if longitude increases toward the West. :param int azimuth: local coordinate system constructed with origin at latr,lonr, X axis ('North') in direction of azimuth, and Y axis such that X x Y = Z(down) when going from (lat,lon) to (x,y) scalar or array. :returns: Array of northward and eastward distances between consecutive points. use :func:`xy2r` to convert to a distance between consecutive points. """ #if len(lat) != len(lon): # raise ArrayMismatch, "Input array sizes do not match" radius = 6367.0 # Earth radius (km) lat = np.radians(lat) lon = np.radians(lon) # Is azimuth fixed or variable? if np.size(azimuth) == 1: angle = np.radians(azimuth)*np.ones(lat.size - 1) else: angle = np.radians(azimuth) cosazi = np.cos(angle) sinazi = np.sin(angle) xntru = xr + radius * (np.diff(lat)) yetru = yr + ieast * radius * (np.diff(lon)) * np.cos(lat[1:]) xn = xntru * cosazi + yetru * sinazi ye = -xntru * sinazi + yetru * cosazi return xn, ye
4ea265f02e87593d389bcd6839390b51cc024add
5,841
import os def create_pipeline_opts(args): """Create standard Pipeline Options for Beam""" options = pipeline_options.PipelineOptions() options.view_as(pipeline_options.StandardOptions).runner = args.runner google_cloud_options = options.view_as(pipeline_options.GoogleCloudOptions) google_cloud_options.project = args.project if args.runner == 'DataflowRunner': google_cloud_options.job_name = args.job_name google_cloud_options.temp_location = '{}/temp'.format(args.storage_bucket) google_cloud_options.staging_location = '{}/staging'.format(args.storage_bucket) worker_options = options.view_as(pipeline_options.WorkerOptions) worker_options.num_workers = args.num_workers worker_options.max_num_workers = args.max_num_workers worker_options.machine_type = args.machine_type setup_options = options.view_as(pipeline_options.SetupOptions) setup_options.setup_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'setup.py') return options
dabf786dad2548eb04d1ae970b7160a007a30c57
5,842
import sys def check_if_module_installed(module_name): """Check if a module is installed. :param module_name: Name of Module :return: Bool if module is installed or not """ distribution_instance = filter(None, (getattr(finder, 'find_distributions', None) for finder in sys.meta_path)) for res in distribution_instance: dists = res(DistributionFinder.Context(name=module_name)) dist = next(iter(dists), None) if dist is not None: return True else: return False
70149e9e285c71dbe62e9a46fcc8733b86bf3413
5,843
def __virtual__(): """Only load if grafana4 module is available""" return "grafana4.get_org" in __salt__
acbfe3b15dafc45ab36955d0a72b92544f4dd41a
5,844
def setup( key=None, force=False ): """Do setup by creating and populating the directories This incredibly dumb script is intended to let you unpack the Tcl/Tk library Togl from SourceForce into your PyOpenGL 3.0.1 (or above) distribution. Note: will not work with win64, both because there is no win64 package and because we don't have a url defined for it. """ if key is None: key = '%s%s'%( sys.platform,suffix ) log.info( 'Doing setup for platform key: %s', key ) target_directory = os.path.join( os.path.dirname( OpenGL.__file__ ), 'Tk', 'togl-%s'%( key, ), ) log.info( 'Target directory: %s', target_directory ) if key not in urls: log.error( """URL for platform key %s is not present, please update script""", key, ) sys.exit( 1 ) if os.path.exists( target_directory ): return False url = urls[key] log.info( 'Downloading: %s', url ) filename,headers = urllib.urlretrieve( url ) log.info( 'Downloaded to: %s', filename ) if not os.path.isdir( target_directory ): log.warn( 'Creating directory: %s', target_directory ) try: os.makedirs( target_directory ) except OSError, err: log.error( "Unable to create directory: %s", target_directory ) sys.exit( 2 ) if '.tar.gz' in url: log.info( 'Opening TarFile' ) fh = tarfile.open( filename, 'r:gz') def getnames(): return fh.getnames() def getfile( name ): return fh.extractfile( name ) elif '.zip' in url: log.info( 'Opening ZipFile' ) fh = zipfile.ZipFile( filename ) def getnames(): return fh.namelist() def getfile( name ): return fh.open( name ) try: for name in getnames(): log.debug( 'Found file: %s', name ) if fnmatch.fnmatch( name, WANTED_FILES ): if not name.endswith( '/' ): log.info( 'Found wanted file: %s', name ) source = getfile( name ) try: new = os.path.join( target_directory, os.path.basename( name ), ) log.info( 'Writing file: %s', new ) open( new,'wb' ).write( source.read() ) finally: if hasattr( source, 'close' ): source.close() finally: fh.close() if filename != url: os.remove( filename ) return True
928553622010570f657bfe59b30aa3d5fad68435
5,845
def categories_report(x): """Returns value counts report. Parameters ---------- x: pd.Series The series with the values Returns ------- string The value counts report. str1 = False 22 | True 20 | nan 34 str2 = False (22) | True (20) | nan (34) """ # Do counting and sorting counts = x.value_counts(dropna=False) counts.index = counts.index.map(str) counts = counts.sort_index() # Create different strings str1 = ' | '.join(str(counts).split("\n")[:-1]) str2 = ' | '.join("%s (%s)" % (i, counts[i]) for i in counts.index) # Return return str2
695ccd73ee73a13e92edbdf0eb242121d136ddbb
5,846
def train(total_loss, global_step, train_num_examples): """Train model. Create an optimizer and apply to all trainable variables. Add moving average for all trainable variables. Args: total_loss: Total loss from loss(). global_step: Integer Variable counting the number of training steps processed. Returns: train_op: op for training. """ # Variables that affect learning rate. # num_batches_per_epoch = train_num_examples / FLAGS.batch_size # decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY) decay_steps = DECAY_STEPS # Decay the learning rate exponentially based on the number of steps. lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True) tf.scalar_summary('learning_rate', lr) # Generate moving averages of all losses and associated summaries. loss_averages_op = _add_loss_summaries(total_loss) # Compute gradients. with tf.control_dependencies([loss_averages_op]): opt = tf.train.GradientDescentOptimizer(lr) grads = opt.compute_gradients(total_loss) # Apply gradients. apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) # Add histograms for trainable variables. for var in tf.trainable_variables(): tf.histogram_summary(var.op.name, var) # Add histograms for gradients. for grad, var in grads: if grad is not None: tf.histogram_summary(var.op.name + '/gradients', grad) # Track the moving averages of all trainable variables. variable_averages = tf.train.ExponentialMovingAverage( MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) with tf.control_dependencies([apply_gradient_op, variables_averages_op]): train_op = tf.no_op(name='train') return train_op
66189c7fd3ec55d08e6a197f2f821adc6e1b3aad
5,847
def config_module_add(): """Add module for configuration Add an available module to the config file. POST json object structure: {"action": "add", "value": { "module": "modulename", "moduleprop": ... }} On success, an object with this structure is returned: {"_meta": {"id": <new ID>}, "module": "modulename"} Otherwise an error message and code. """ config = read_config() action = request.json["action"] if action == "add": if "value" not in request.json: return _ret_invalid_request("value") if "module" not in request.json["value"]: return _ret_invalid_request("value/module") moduletype = request.json["value"]["module"] if moduletype not in _get_available_modules(): return _ret_unknown_module(moduletype) newid = max([x["_meta"]["id"] for x in config["modules"]]) + 1 newmodule = request.json["value"] if "_meta" not in newmodule: newmodule["_meta"] = {} if "_order" not in newmodule["_meta"]: newmodule["_meta"]["order"] = 0 newmodule["_meta"]["id"] = newid config["modules"].append(newmodule) write_config(config) ret = {"_meta": {"id": newid}, "module": moduletype} return jsonify(ret) else: return _ret_unknown_action(action)
8ae9324e29408614ee324c3ba32ddab169e0b50e
5,848
def command_add(fname, ctype, **kwa): """returns (str) command to add consatants from file to the DB, ex.: cdb add -e testexper -d testdet_1234 -c test_ctype -r 123 -f cm-confpars.txt -i txt -l DEBUG """ exp = kwa.get('experiment', None) det = kwa.get('detname', None) runnum = kwa.get('runnum', None) timestamp = kwa.get('timestamp', None) time_sec = kwa.get('time_sec', None) version = kwa.get('version', None) dtype = kwa.get('dtype', None) comment = kwa.get('coment', None) loglev = kwa.get('loglev', None) confirm = kwa.get('cdbadd', True) cmd = 'cdb add' if exp is not None: cmd += ' -e %s' % exp if det is not None: cmd += ' -d %s' % det if ctype is not None: cmd += ' -c %s' % ctype.ljust(12) if dtype is not None: cmd += ' -i %s' % dtype if runnum is not None: cmd += ' -r %s' % str(runnum) if timestamp is not None: cmd += ' -t %s' % timestamp if fname is not None: cmd += ' -f %s' % fname if loglev is not None: cmd += ' -l %s' % loglev if version is not None: cmd += ' -v %s' % version if comment is not None: cmd += ' -m %s' % comment if time_sec is not None: cmd += ' -s %s' % str(time_sec) if confirm: cmd += ' -C' logger.debug('command: %s' % cmd) return cmd
b6c1622f635c5f75b462be6ce025094f6df88ae5
5,849
def ustobj2songobj( ust: up.ust.Ust, d_table: dict, key_of_the_note: int = None) -> up.hts.Song: """ Ustオブジェクトをノートごとに処理して、HTS用に変換する。 日本語歌詞を想定するため、音節数は1とする。促音に注意。 ust: Ustオブジェクト d_table: 日本語→ローマ字変換テーブル key_of_the_note: 曲のキーだが、USTからは判定できない。 Sinsyでは 0 ~ 11 または 'xx' である。 """ song = up.hts.Song() ust_notes = ust.notes # Noteオブジェクトの種類を変換 for ust_note in ust_notes: hts_note = ustnote2htsnote(ust_note, d_table, key_of_the_note=key_of_the_note) song.append(hts_note) # ノート長や位置などを自動補完 song.autofill() # 発声開始時刻と終了時刻をノート長に応じて設定 song.reset_time() return song
53c92783d881702aa42b7f24c8a1596248b30108
5,850
def detect_ol(table): """Detect ordered list""" if not len(table): return False for tr in table: if len(tr)!=2: return False td1 = tr[0] # Only keep plausible ordered lists if td1.text is None: return False text = td1.text.strip() if not text or len(text)>3: return False if text[-1] not in ('.', ')'): return False if not text[:-1].isalpha() and not text[:-1].isdigit(): return False if len(td1): return False return True
b7082932fba6ba7f9634e70ea424561c084a2dc1
5,851
import os def read_dataset_test(data_dir, transforms=None): """ Read the Mini-ImageNet dataset. Args: data_dir: directory containing Mini-ImageNet. Returns: A tuple (train, val, test) of sequences of ImageNetClass instances. """ return tuple([_read_classes(os.path.join(data_dir, 'test'), transforms)])
e7b312bf60341fe13858049b5e3bff5a52c58811
5,852
def analyze_syntax(text): """Use the NL API to analyze the given text string, and returns the response from the API. Requests an encodingType that matches the encoding used natively by Python. Raises an errors.HTTPError if there is a connection problem. """ credentials = GoogleCredentials.get_application_default() scoped_credentials = credentials.create_scoped( ['https://www.googleapis.com/auth/cloud-platform']) http = httplib2.Http() scoped_credentials.authorize(http) service = discovery.build( 'language', 'v1beta1', http=http) body = { 'document': { 'type': 'PLAIN_TEXT', 'content': text, }, 'features': { 'extract_syntax': True, }, 'encodingType': get_native_encoding_type(), } request = service.documents().annotateText(body=body) return request.execute()
84387cf163f9cfab4fcabd0a43e43aab250bd01d
5,853
from typing import List from typing import Optional import os def _parseArgs(args: List[str]) -> Arguments: """ Parse the arguments. Terminates the script if errors are found. """ argLen = len(args) # Initialize the argument values inputPath: str = None sheetName: Optional[str] = None outputPath: Optional[str] = None # Check if the input path was specified if argLen < 2: raise ArgsError('The input file was not specified.') # Check if the input file exists if not os.path.exists(args[1]): raise ArgsError(f'The file "{args[1]}" does not exist.') inputPath = args[1] argIdx = 2 # Check each optional argument while argIdx < argLen: # Check the sheet argument if args[argIdx] in ('-s', '--sheet'): if argIdx + 1 == argLen: raise ArgsError('Sheet name was not specified.') sheetName = args[argIdx + 1] argIdx += 2 # Check the outputPath argument elif args[argIdx] in ('-o', '--output'): if argIdx + 1 == argLen: raise ArgsError('Output path was not specified.') outputPath = args[argIdx + 1] argIdx += 2 # If the argument is unrecognized else: raise ArgsError(f'The argument "{args[2]}" is unrecognized.') return Arguments(inputPath, sheetName, outputPath)
71b07ea7d32a4af23c7c08f52f8eb096fc126955
5,854
def kabsch_numpy(X, Y): """ Kabsch alignment of X into Y. Assumes X,Y are both (Dims x N_points). See below for wrapper. """ # center X and Y to the origin X_ = X - X.mean(axis=-1, keepdims=True) Y_ = Y - Y.mean(axis=-1, keepdims=True) # calculate convariance matrix (for each prot in the batch) C = np.dot(X_, Y_.transpose()) # Optimal rotation matrix via SVD V, S, W = np.linalg.svd(C) # determinant sign for direction correction d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0 if d: S[-1] = S[-1] * (-1) V[:, -1] = V[:, -1] * (-1) # Create Rotation matrix U U = np.dot(V, W) # calculate rotations X_ = np.dot(X_.T, U).T # return centered and aligned return X_, Y_
85e42d58f667c70b3ad0fe0fe888fdfb383d34ee
5,855
import six import base64 def _decode(value): """ Base64 解码,补齐"=" 记得去错多余的“=”,垃圾Docker,签发的时候会去掉 :param value: :return: """ length = len(value) % 4 if length in (2, 3,): value += (4 - length) * "=" elif length != 0: raise ValueError("Invalid base64 string") if not isinstance(value, six.binary_type): value = value.encode() return base64.urlsafe_b64decode(value)
c4a28605fb7f8a0d5110fb06738c31b030cae170
5,856
def header_elements(fieldname, fieldvalue): """Return a sorted HeaderElement list from a comma-separated header string. """ if not fieldvalue: return [] result = [] for element in RE_HEADER_SPLIT.split(fieldvalue): if fieldname.startswith('Accept') or fieldname == 'TE': hv = AcceptElement.from_str(element) else: hv = HeaderElement.from_str(element) result.append(hv) return list(reversed(sorted(result)))
8846a0b5e89e0a4d0d3d6192e988dfe78e394338
5,857
def line2dict(st): """Convert a line of key=value pairs to a dictionary. :param st: :returns: a dictionary :rtype: """ elems = st.split(',') dd = {} for elem in elems: elem = elem.split('=') key, val = elem try: int_val = int(val) dd[key] = int_val except ValueError: dd[key] = val return dd
86bb6c2e72c8a6b2a027d797de88089067ff7475
5,858
def transform(walls, spaces): """svg coords are in centimeters from the (left, top) corner, while we want metres from the (left, bottom) corner""" joint = np.concatenate([np.concatenate(walls), np.concatenate(spaces)]) (left, _), (_, bot) = joint.min(0), joint.max(0) def tr(ps): x, y = ps[..., 0], ps[..., 1] return np.stack([x - left, bot - y], -1)/SCALE + MARGIN return tr(walls), [tr(s) for s in spaces]
5ae28593a72567cf3c15f75fd37b44ca7b9468a8
5,859
from typing import Union from pathlib import Path from typing import Dict from typing import Any import os import json def get_json(partition: str, start: float = 0.0, end: float = 1.0, return_data: bool = False) -> Union[Path, Dict[Any, Any]]: """path, gender, age, result result=-1 for test set Example ------- ``` JSON_TRAIN = get_json('train', start=0.0, end=0.8) JSON_VALID = get_json('train', start=0.8, end=1.0) ``` """ wav = WAV_FILES[partition] wav_meta = WAV_META[partition] # === 1. prepare meta meta = defaultdict(dict) for k, tab in META_DATA[partition].items(): tab: pd.DataFrame for _, row in tab.iterrows(): meta[row['uuid']].update({ i: eval(j) if isinstance(j, string_types) and '[' in j else j for i, j in row.items()}) # === 2. load wav data = [] for f in sorted(wav): name = os.path.basename(f) uuid = name.replace('.wav', '') row: dict = meta[uuid] dur, sr = wav_meta[f] row['duration'] = dur row['sr'] = sr data.append((uuid, dict(path=f, meta=row))) # === 3. shuffle and split rand = np.random.RandomState(seed=DATA_SEED) rand.shuffle(data) n = len(data) start = int(n * start) end = int(n * end) data = data[start:end] data = dict(data) if return_data: return data # === 4. save to JSON path = os.path.join(CACHE_PATH, f'{partition}_{start:g}_{end:g}_{DATA_SEED:d}.json') with open(path, 'w', encoding='utf-8') as f: json.dump(data, f) return Path(path)
7260748f2197d1632f814c0c801f26d737fcce81
5,860
def isolate(result_file, isolate_file, mode, variables, out_dir, error): """Main function to isolate a target with its dependencies. Arguments: - result_file: File to load or save state from. - isolate_file: File to load data from. Can be None if result_file contains the necessary information. - mode: Action to do. See file level docstring. - variables: Variables to process, if necessary. - out_dir: Output directory where the result is stored. It's use depends on |mode|. Some arguments are optional, dependending on |mode|. See the corresponding MODE<mode> function for the exact behavior. """ # First, load the previous stuff if it was present. Namely, "foo.result" and # "foo.state". complete_state = CompleteState.load_files(result_file, out_dir) isolate_file = isolate_file or complete_state.saved_state.isolate_file if not isolate_file: error('A .isolate file is required.') if (complete_state.saved_state.isolate_file and isolate_file != complete_state.saved_state.isolate_file): error( '%s and %s do not match.' % ( isolate_file, complete_state.saved_state.isolate_file)) try: # Then process options and expands directories. complete_state.load_isolate(isolate_file, variables, error) # Regenerate complete_state.result.files. complete_state.process_inputs(LEVELS[mode]) # Finally run the mode-specific code. result = VALID_MODES[mode](out_dir, complete_state) except run_test_from_archive.MappingError, e: error(str(e)) # Then store the result and state. complete_state.save_files() return result
16201bf1fb11bafc9913fc620f0efea3de887e62
5,861
def check_structure(struct): """ Return True if the monophyly structure represented by struct is considered "meaningful", i.e. encodes something other than an unstructured polytomy. """ # First, transform e.g. [['foo'], [['bar']], [[[['baz']]]]], into simply # ['foo','bar','baz']. def denester(l): if type(l) != list: return l if len(l) == 1: return denester(l[0]) return [denester(x) for x in l] struct = denester(struct) # Now check for internal structure if not any([type(x) == list for x in struct]): # Struct is just a list of language names, with no internal structure return False return True
e07a2f39c7d3b8f2454b5171119b8698f4f58a99
5,862
import torch def batchify_with_label(input_batch_list, gpu, volatile_flag=False): """ input: list of words, chars and labels, various length. [[words,biwords,chars,gaz, labels],[words,biwords,chars,labels],...] words: word ids for one sentence. (batch_size, sent_len) chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length) output: zero padding for word and char, with their batch length word_seq_tensor: (batch_size, max_sent_len) Variable char_seq_lengths: (batch_size,1) Tensor char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable character_seq_lengths: (batch_size*max_sent_len,1) Tensor char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order label_seq_tensor: (batch_size, max_sent_len) mask: (batch_size, max_sent_len) """ batch_size = len(input_batch_list) chars = [sent[0] for sent in input_batch_list] bichars = [sent[1] for sent in input_batch_list] gazs = [sent[2] for sent in input_batch_list] labels = [sent[3] for sent in input_batch_list] char_seq_lengths = torch.LongTensor(list(map(len, chars))) max_seq_len = char_seq_lengths.max().item() with torch.no_grad(): # torch.zeros(*sizes, out=None) → Tensor # 返回一个全为标量 0 的张量,形状由可变参数sizes 定义 # sizes (int...) – 整数序列,定义了输出形状 # out(Tensor, optional) – 结果张量 char_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_seq_len))).long() bichar_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_seq_len))).long() label_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_seq_len))).long() mask = autograd.Variable(torch.zeros((batch_size, max_seq_len))).byte() for idx, (seq, biseq, label, seqlen) in enumerate(zip(chars, bichars, labels, char_seq_lengths)): # torch.Tensor是一种包含单一数据类型元素的多维矩阵 # 64-bit integer (signed) torch.LongTensor torch.cuda.LongTensor char_seq_tensor[idx, :seqlen] = torch.LongTensor(seq) bichar_seq_tensor[idx, :seqlen] = torch.LongTensor(biseq) label_seq_tensor[idx, :seqlen] = torch.LongTensor(label) mask[idx, :seqlen] = torch.Tensor([1] * seqlen.item()) char_seq_lengths, char_perm_idx = char_seq_lengths.sort(0, descending=True) char_seq_tensor = char_seq_tensor[char_perm_idx] bichar_seq_tensor = bichar_seq_tensor[char_perm_idx] label_seq_tensor = label_seq_tensor[char_perm_idx] mask = mask[char_perm_idx] _, char_seq_recover = char_perm_idx.sort(0, descending=False) # keep the gaz_list in orignial order gaz_list = [gazs[i] for i in char_perm_idx] gaz_list.append(volatile_flag) if gpu: char_seq_tensor = char_seq_tensor.cuda() bichar_seq_tensor = bichar_seq_tensor.cuda() char_seq_lengths = char_seq_lengths.cuda() char_seq_recover = char_seq_recover.cuda() label_seq_tensor = label_seq_tensor.cuda() mask = mask.cuda() return gaz_list, char_seq_tensor, bichar_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, mask
aea1b271292751740b35fe0d18b133beb7df53c7
5,863
def symbolicMatrix(robot): """ Denavit - Hartenberg parameters for n - th rigid body theta: rotation on «z» axis d: translation on «z» axis a: translation on «x» axis alpha: rotation on «x» axis """ return np.array([[0, 0, 0, 0], [robot.symbolicJointsPositions[0, 0], robot.symbolicLinksLengths[0], 0, np.pi / 2], [robot.symbolicJointsPositions[1, 0], 0, robot.symbolicLinksLengths[1], 0], [robot.symbolicJointsPositions[2, 0], 0, 0, np.pi / 2], [robot.symbolicJointsPositions[3, 0], robot.symbolicLinksLengths[2], 0, 0]])
3b476527336b15c171bbede76ff5b4ed2d4a6eb6
5,864
from typing import List import json def magnitude_list(data: List) -> List: """ :param data: :return: """ if data is None or len(data) == 0: return [] if isinstance(data, str): try: data = json.loads(data) except: data = data try: input_data = np.array([i for i in data]) data = norm(input_data, axis=1).tolist() except Exception as e: print("Error in calculating magnigutude ----> ") print("Data: ", data) print("NP Array: ", input_data) print(e) raise Exception return data
fc124d9a21b4b08ac50731c9234676860b837acf
5,865
import subprocess def generate_keypair(passphrase): """ Create a pair of keys with the passphrase as part of the key names """ keypath = '/tmp/test_{}_key'.format(passphrase) command = 'ssh-keygen -t rsa -b 4096 -C "{p}" -P "{p}" -f {k} -q' command = command.format(p=passphrase, k=keypath) subprocess.check_call(command, shell=True) return keypath, keypath + '.pub'
d4c8155173273feda778f5f54a4b0513353a293b
5,866
def lookup_axis1(x, indices, fill_value=0): """Return values of x at indices along axis 1, returning fill_value for out-of-range indices. """ # Save shape of x and flatten ind_shape = indices.shape a, b = x.shape x = tf.reshape(x, [-1]) legal_index = indices < b # Convert indices to legal indices in flat array indices = tf.clip_by_value(indices, 0., b - 1.) indices = indices + b * tf.range(a, dtype=float_type())[:, o, o] indices = tf.reshape(indices, shape=(-1,)) indices = tf.dtypes.cast(indices, dtype=int_type()) # Do indexing result = tf.reshape(tf.gather(x, indices), shape=ind_shape) # Replace illegal indices with fill_value, cast to float explicitly return tf.cast(tf.where(legal_index, result, tf.zeros_like(result) + fill_value), dtype=float_type())
6e93475d5c6324a709792903a453ffbb454d2d62
5,867
def delete_compute_job(): """ Deletes the current compute job. --- tags: - operation consumes: - application/json parameters: - name: agreementId in: query description: agreementId type: string - name: jobId in: query description: Id of the job. type: string - name: owner in: query description: owner type: string """ #since op-engine handles this, there is no need for this endpoint. Will just keep it here for backwards compat return jsonify(""), 200
112e79bdfb9c569aa0d49275bf3df14c7eecd7b5
5,868
import io def load_dict(dict_path): """ Load a dict. The first column is the value and the second column is the key. """ result_dict = {} for idx, line in enumerate(io.open(dict_path, "r", encoding='utf8')): terms = line.strip("\n") result_dict[idx] = terms return result_dict
cad2061561c26e247687e7c2ee52fb5cf284352a
5,869
def project(x, n): """ http://www.euclideanspace.com/maths/geometry/elements/plane/lineOnPlane/""" l = np.linalg.norm(x) a = normalize(x) b = normalize(n) axb = np.cross(a,b) bxaxb = np.cross(b, axb) return l * bxaxb
e80d87454457920edfbe9638e6793372000bb3bd
5,870
def topologicalSort(roots, getParents): """Return a topological sorting of nodes in a graph. roots - list of root nodes to search from getParents - function which returns the parents of a given node """ results = [] visited = set() # Use iterative version to avoid stack limits for large datasets stack = [(node,0) for node in roots] while stack: current, state = stack.pop() if state == 0: # before recursing if current not in visited: visited.add(current) stack.append((current,1)) stack.extend((parent,0) for parent in getParents(current)) else: # after recursing assert current in visited results.append(current) return results
eec46378dc2282447ff1567945334b6cf18dc180
5,871
def get_headers(metric_resource: MetricResource): """ Get the headers to be used in the REST query for the given metric. """ headers = {} # no headers will be used if metric_resource.spec.headerTemplates is None: return headers, None # initialize headers dictionary for item in metric_resource.spec.headerTemplates: headers[item.name] = item.value # if authType is None, interpolation is not attempted if metric_resource.spec.authType is None: return headers, None # if authType is Basic, interpolation is not attempted if metric_resource.spec.authType == AuthType.BASIC: return headers, None # if there is no secret referenced, interpolation is not attempted if metric_resource.spec.secret is None: return headers, None # args contain decoded secret data for header template interpolation args, err = get_secret_data_for_metric(metric_resource) if err is None: for key in headers: headers[key], err = interpolate(headers[key], args) if err is not None: return None, err return headers, None return None, err
00ab2000ef83f12ebcdc26d834b285ca1ab2da40
5,872
from typing import Iterable def indra_upstream_ora( client: Neo4jClient, gene_ids: Iterable[str], **kwargs ) -> pd.DataFrame: """ Calculate a p-value for each entity in the INDRA database based on the set of genes that it regulates and how they compare to the query gene set. """ count = count_human_genes(client=client) return _do_ora( get_entity_to_targets(client=client), gene_ids=gene_ids, count=count, **kwargs )
1e95b4dc329d09055e0f441f3ddef3614a693005
5,873
import socket import sys def log(*args, host="127.0.0.1", port=3001, surround=3, **kwargs) -> bool: """ Create `Log` object and send to codeCTRL server in cbor format. The codectrl.log function collects and formats information about the file/function/line of code it got called on and sends it to the codeCTRL server, if available. Usage: The function takes any number of positional or keyword arguments of all types. All positional arguments get included in the log `message` using str() or json.dumps(obj, indent=4) in case of dicts. Keyword arguments, other than `reserved` ones, get appended to the logs as {key}={value} Reserved arguments: * host: By default set to `127.0.0.1`, this argument holds the address of the codeCTRL server. * port: By default set to `30001`, this is the port the codeCTRL server should be contacted at. * surround: By default `3`, this argument specifies the number of lines of code that should be displayed around the call to `codectrl.log`. """ # This makes it easier for users of the library # to debug errors they caused. assert isinstance(host, str), "host variable has to be a string" assert isinstance(port, int), "port variable has to be an integer" assert isinstance(surround, int), "surround variable has to be an integer" # Try connect to the server. try: soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) soc.connect((host, port)) except Exception as err: # pylint: disable=broad-except # Could be many things. print(f"[codeCTRL] Could not reach codeCTRL server. {err}", file=sys.stderr) return False # Collect logging data log_obj: Log = Log(surround, *args, **kwargs) # Send logging data to server soc.send(log_obj.cbor()) # s.send(b'\0') # close socket soc.close() return True
9ec5cfefda67b2ad8470145270f4978f0828cbb2
5,874
def _take_photo(gopro_instance, interval_secs = 600): """ Take a photo, this function still in dev """ try: img = gopro_instance.take_photo(); return img except TypeError: tl.send_alert() return False except: tl.send_alert( message = \ '🆘*E️rror desconocido*, se requiere soporte técnico urgente!' ) return False #time.sleep(interval_secs) #time_lapse(gopro_instance, interval_secs)
04b59957c513eee44e487ba7f86bf296a0c19150
5,875
import time def evaluate( forecaster, cv, y, X=None, strategy="refit", scoring=None, return_data=False ): """Evaluate forecaster using cross-validation Parameters ---------- forecaster : sktime.forecaster Any forecaster cv : sktime.SlidingWindowSplitter or sktime.ExpandingWindowSplitter Splitter of how to split the data into test data and train data y : pd.Series Target time series to which to fit the forecaster. X : pd.DataFrame, optional (default=None) Exogenous variables strategy : str, optional Must be "refit" or "update", by default "refit". The strategy defines whether forecaster is only fitted on the first train window data and then updated or always refitted. scoring : object of class MetricFunctionWrapper from sktime.performance_metrics, optional. Example scoring=sMAPE(). Used to get a score function that takes y_pred and y_test as arguments, by default None (if None, uses sMAPE) return_data : bool, optional Returns three additional columns in the DataFrame, by default False. The cells of the columns contain each a pd.Series for y_train, y_pred, y_test. Returns ------- pd.DataFrame DataFrame that contains several columns with information regarding each refit/update and prediction of the forecaster. Examples -------- >>> from sktime.datasets import load_airline >>> from sktime.performance_metrics.forecasting import evaluate >>> from sktime.forecasting.model_selection import ExpandingWindowSplitter >>> from sktime.forecasting.naive import NaiveForecaster >>> y = load_airline() >>> forecaster = NaiveForecaster(strategy="drift", sp=12) >>> cv = ExpandingWindowSplitter( initial_window=24, step_length=12, fh=[1,2,3,4,5,6,7,8,9,10,11,12] ) >>> evaluate(forecaster=forecaster, y=y, cv=cv) """ cv = check_cv(cv) y = check_y(y) _check_strategies(strategy) scoring = check_scoring(scoring) results = pd.DataFrame() cv.start_with_window = True for i, (train, test) in enumerate(cv.split(y)): # get initial window, if required if i == 0 and cv.initial_window and strategy == "update": train, test = cv.split_initial(y) # this might have to be directly handled in split_initial() test = test[: len(cv.fh)] # create train/test data y_train = y.iloc[train] y_test = y.iloc[test] X_train = X.iloc[train] if X else None X_test = X.iloc[test] if X else None # fit/update start_fit = time.time() if strategy == "refit" or i == 0: forecaster.fit( y=y_train, X=X_train, fh=ForecastingHorizon(y_test.index, is_relative=False), ) else: # strategy == "update" and i != 0: forecaster.update(y=y_train, X=X_train) fit_time = time.time() - start_fit # predict start_pred = time.time() y_pred = forecaster.predict( fh=ForecastingHorizon(y_test.index, is_relative=False), X=X_test ) pred_time = time.time() - start_pred # save results results = results.append( { "test_" + scoring.__class__.__name__: scoring(y_pred, y_test), "fit_time": fit_time, "pred_time": pred_time, "len_train_window": len(y_train), "cutoff": forecaster.cutoff, "y_train": y_train if return_data else np.nan, "y_test": y_test if return_data else np.nan, "y_pred": y_pred if return_data else np.nan, }, ignore_index=True, ) # post-processing of results if not return_data: results = results.drop(columns=["y_train", "y_test", "y_pred"]) results["len_train_window"] = results["len_train_window"].astype(int) return results
ea9663c942ee71c40674c64196b3ced5f61a2c2c
5,876
def euler(step, y0): """ Implements Euler's method for the differential equation dy/dx = 1/(2(y-1)) on the interval [0,4] """ x = [0] index_x = 0 while x[index_x] < 4: x.append(x[index_x] + step) index_x += 1 index_y = 0 y = [y0] def yprime(y): yprime = 1 / (2 * (y - 1)) return yprime while index_y < index_x: y.append(y[index_y] + step * yprime(y[index_y])) index_y += 1 return x, y
89c6e6409a1c43ce4766507fba2f401bb01cfbb8
5,877
import re import os from urllib.parse import urlparse from urllib.parse import urlparse import requests def get_fileinfo(url: str, proxy: str = '', referer: str = '') -> (str, str, requests.Response): """ 获取待下载的文件信息 Gets information about the file to be downloaded :param url: 文件url :param proxy: 代理 :param referer: 绕反爬 :return: 真实url,文件名,http头部信息 (headers中键值均为小写) """ proxies = { 'http': 'http://'+proxy, 'https': 'https://'+proxy } if proxy else {} if referer: headers['referer'] = referer try: res = requests.head(url, headers=headers, proxies=proxies) except Exception as e: return '', repr(e), None while res.status_code in [301, 302]: url = {i[0]: i[1] for i in res.headers.lower_items()}['location'] res = requests.head(url, headers=headers, proxies=proxies) res.headers = {i[0]: i[1] for i in res.headers.lower_items()} if 'content-disposition' in res.headers: try: filename = re.findall('filename=(.*?);', res.headers['content-disposition'])[0] except IndexError: filename = os.path.basename(urlparse(url).path.strip('/')) else: filename = os.path.basename(urlparse(url).path.strip('/')) return url, re.sub(r"^\W+|\W+$", "", filename), res
e8a0d093a997cfa43bab5bb41b0d71d2c78eb118
5,878
import random def generate_random_solution(): """generate_random_solution() Generates a random solution of random characters from [ ,!,..A..Z..a..z...~].""" global answer #codes for chars [ ,!..A..Z..a..z..~] chars = list(range(32,127)) solution = [] while len(solution) < len(answer): #generate random solutions to length of the true answer solution.append(random.choice(chars)) return solution
534a4a249bbbbc9e285b3dc9ccc5010413239b66
5,879
import importlib.util import click from pathlib import Path import sys import traceback def entrypoint_module(config): """Lazily returns the entrypoint module defined in a qaboard config""" entrypoint = config.get('project', {}).get('entrypoint') if not entrypoint: click.secho(f'ERROR: Could not find the entrypoint', fg='red', err=True, bold=True) click.secho(f'Add to qaboard.yaml:\n```\nproject:\n entrypoint: my_main.py\n```', fg='red', err=True, dim=True) return FailingEntrypoint() else: entrypoint = Path(entrypoint) try: name = f'qaboard-entrypoint' # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly spec = importlib.util.spec_from_file_location(name, entrypoint) module = importlib.util.module_from_spec(spec) sys.path.insert(0, str(entrypoint.parent)) spec.loader.exec_module(module) # sys.path.pop(0) # spec = importlib.util.spec_from_loader(name, importlib.machinery.SourceFileLoader(name, str(entrypoint))) # spec.submodule_search_locations = [str(entrypoint.parent)] # with cached versions of the entrypoint.... An option could be importlib.reload(module) # FIXME: at some points I had issues with sys.path, but no more (?) except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() click.secho(f'ERROR: Error importing the entrypoint ({entrypoint}).', fg='red', err=True, bold=True) click.secho(''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)), fg='red', err=True) click.secho( f'{entrypoint} must implement a `run(context)` function, and optionnally `postprocess` / `metadata` / `iter_inputs`.\n' 'Please read the tutorial at https://samsung.github.com/qaboard/docs\n', dim=True, err=True) return FailingEntrypoint() return module
2997eb2ba4ff11ec1e1b8133cb98b6d18712a03c
5,880
def getTV_Info(): """ 获取TeamViewer的账号和密码信息 使用 Spy++ 读取特定程序中子窗口及各个控件类的信息, 然后 使用 win32 api 读取文本框中的内容 注意: # FindWindowEx() 只能查找直接子窗口,因此需要逐级查找 # 该函数的第二个参数用于表示在哪个子窗口继续查找,用于查找包含两个相同类名的子窗口 参考: https://github.com/wuxc/pywin32doc/blob/master/md/win32gui.md#win32guifindwindowex """ # 获取指定 Handle id_hwnd, pwd_hwnd = get_Hwnd() ID = get_Text(id_hwnd) # 如果数据还未生成,则重新读取 while len(ID) < 6: # 保证Teamviewer 本身是正常运行 id_hwnd, pwd_hwnd = get_Hwnd() ID = get_Text(id_hwnd) Password = get_Text(pwd_hwnd) print("ID:",ID, "Password:",Password) return ID, Password
2d6de5029eda4b447d9fa87c271e18fe94148dc9
5,881
import jieba def tokenize_words(text): """Word segmentation""" output = [] sentences = split_2_short_text(text, include_symbol=True) for sentence, idx in sentences: if is_chinese_string(sentence): output.extend(jieba.lcut(sentence)) else: output.extend(whitespace_tokenize(sentence)) return output
f50c963316927a8051489a22cae674b19ab7b0d5
5,882
def segmentation_model_func(output_channels, backbone_name, backbone_trainable=True): """ Creates a segmentation model with the tf.keras functional api. Args: output_channels: number of output_channels (classes) backbone_name: name of backbone; either: 'vgg19', 'resnet50', 'resnet50v2', 'mobilenetv2', 'resnet101' Returns: tf.keras functional model """ down_stack = create_backbone(name=backbone_name, set_trainable=backbone_trainable) skips = [down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][0]).output, down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][1]).output, down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][2]).output, down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][3]).output, down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][4]).output] up_stack_filters = [64, 128, 256, 512] x = skips[-1] skips = reversed(skips[:-1]) up_stack_filters = reversed(up_stack_filters) # Upsampling and establishing the skip connections for skip, filters in zip(skips, up_stack_filters): x = simple_upblock(x, filters, 3, 'up_stack' + str(filters)) x = tf.keras.layers.Concatenate()([x, skip]) # x = simple_upblock_func(x, 32, 3, 'up_stack' + str(32)) x = tf.keras.layers.UpSampling2D(2)(x) x = tf.keras.layers.Conv2D(32, 3, activation='relu', padding='same')(x) x = tf.keras.layers.Conv2D(output_channels, 1, activation='softmax', padding='same', name='final_output')(x) return tf.keras.Model(inputs=down_stack.layers[0].input, outputs=x)
31f46e0fcde797c22c07abeabbf4e4879bddf180
5,883
def EventAddKwargs(builder, kwargs): """This method is deprecated. Please switch to AddKwargs.""" return AddKwargs(builder, kwargs)
b19aa256819f3be1b018baf469b72293a08fa4db
5,884
import json def generate_sidecar(events, columns_selected): """ Generate a JSON sidecar template from a BIDS-style events file. Args: events (EventInput): An events input object to generate sidecars from. columns_selected (dict): A dictionary of columns selected. Returns: dict: A dictionary of results in standard format including either the generated sidecar string or errors. """ columns_info = BidsTsvSummary.get_columns_info(events.dataframe) hed_dict = {} for column_name, column_type in columns_selected.items(): if column_name not in columns_info: continue if column_type: column_values = list(columns_info[column_name].keys()) else: column_values = None hed_dict[column_name] = generate_sidecar_entry(column_name, column_values=column_values) display_name = events.name file_name = generate_filename(display_name, name_suffix='_generated', extension='.json') return {base_constants.COMMAND: base_constants.COMMAND_GENERATE_SIDECAR, base_constants.COMMAND_TARGET: 'events', 'data': json.dumps(hed_dict, indent=4), 'output_display_name': file_name, 'msg_category': 'success', 'msg': 'JSON sidecar generation from event file complete'}
4fada8d65eab69384cb1d1f26f888d40fd0cea90
5,885
def _filter_artifacts(artifacts, relationships): """ Remove artifacts from the main list if they are a child package of another package. Package A is a child of Package B if all of Package A's files are managed by Package B per its file manifest. The most common examples are python packages that are installed via dpkg or rpms. :param artifacts: :param relationships: :return: """ def filter_fn(artifact): # some packages are owned by other packages (e.g. a python package that was installed # from an RPM instead of with pip), filter out any packages that are not "root" packages. if _filter_relationships( relationships, child=dig(artifact, "id"), type="ownership-by-file-overlap" ): return False return True return [a for a in artifacts if filter_fn(a)]
642f16fd4b9784288a283a21db8632cc11af6cba
5,886
def get_augmented_image_palette(img, nclusters, angle): """ Return tuple of (Image, Palette) in LAB space color shifted by the angle parameter """ lab = rgb2lab(img) ch_a = lab[...,1] ch_b = lab[...,2] theta = np.deg2rad(angle) rot = np.array([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]]) hue_rotate = lambda ab: np.dot(rot, [ab[0], ab[1]]) ab = np.asarray(list(map(hue_rotate, zip(ch_a, ch_b)))).transpose((0, 2, 1)) lab = np.dstack((lab[...,0], ab[...,0], ab[...,1])) palette = kmeans_get_palette(lab, nclusters) return (lab, palette)
89cfc4f50a70be413aa6525b9b462924baaf9907
5,887
import os def autodetect(uri: str, **kwargs) -> intake.source.DataSource: """ Autodetect intake source given URI. Keyword arguments are passed to the source constructor. If no other source is more suitable, it returns an instance of :class:`intake_io.source.ImageIOSource`, which uses `imageio <https://github.com/imageio/imageio>`_. This function doesn't check whether the data can actually be loaded. :param uri: URI (e.g. file system path or URL) :param kwargs: Arguments passed to the source constructor :return: Data source """ luri = uri.lower() lext = os.path.splitext(luri)[-1] if lext == ".nrrd": return source.NrrdSource(uri, **kwargs) elif lext in (".tif", ".tiff"): return source.TifSource(uri, **kwargs) elif luri.endswith(".nii.gz") or lext == ".nii": return source.NiftiSource(uri, **kwargs) elif lext in (".dicom", ".dcm"): return source.DicomSource(uri, **kwargs) elif luri.endswith(".dicom.zip") or luri.endswith(".dcm.zip"): return source.DicomZipSource(uri, **kwargs) elif lext == ".klb": return source.KlbSource(uri, **kwargs) elif luri.endswith(".ome.tif") or luri.endswith(".ome.tiff") \ or lext not in (".tif", ".tiff", ".png", ".jpg", ".gif", ".mp4"): return source.BioformatsSource(uri, **kwargs) else: return source.ImageIOSource(uri, **kwargs)
b93e91d0fd54ccca71d1a64c269bac963bd82b69
5,888
def squeeze__default(ctx, g, self, dim=None): """Register default symbolic function for `squeeze`. squeeze might be exported with IF node in ONNX, which is not supported in lots of backend. """ if dim is None: dims = [] for i, size in enumerate(self.type().sizes()): if size == 1: dims.append(i) else: dims = [sym_help._get_const(dim, 'i', 'dim')] return g.op('Squeeze', self, axes_i=dims)
7ce7672f187f2d699cc378d00b1415007a2fe04b
5,889
from predefinedentities import BANNED_PREF_BRANCHES, BANNED_PREF_REGEXPS import re def _call_create_pref(a, t, e): """ Handler for pref() and user_pref() calls in defaults/preferences/*.js files to ensure that they don't touch preferences outside of the "extensions." branch. """ if not t.im_self.filename.startswith("defaults/preferences/") or len(a) == 0: return value = str(t(a[0]).get_literal_value()) for banned in BANNED_PREF_BRANCHES: if value.startswith(banned): return ("Extensions should not alter preferences in the '%s' " "preference branch" % banned) for banned in BANNED_PREF_REGEXPS: if re.match(banned, value): return ("Extensions should not alter preferences matching /%s/" % banned) if not value.startswith("extensions.") or value.rindex(".") < len("extensions."): return ("Extensions should not alter preferences outside of the " "'extensions.' preference branch. Please make sure that " "all of your extension's preferences are prefixed with " "'extensions.add-on-name.', where 'add-on-name' is a " "distinct string unique to and indicative of your add-on.")
90ceef343ead469da5fb078b45ee30c87fceb84b
5,890
def pig_action_utility(state, action, utility): """The expected value of choosing action in state.Assumes opponent also plays with optimal strategy. An action is one of ["roll", "hold", "accept", decline", "double"] """ if action == 'roll': one = iter([1]) rest = iter([2, 3, 4, 5, 6]) return (-utility(do(action, state, one)) + sum(utility(do(action, state, rest)) for _ in range(5))) / 6.0 else: return -utility(do(action, state, fair_die_rolls()))
c2e06a074f5fefd62f8a810e338bb7938d1cf6fd
5,891
def to_canonical_url(url): """ Converts a url into a "canonical" form, suitable for hashing. Keeps only scheme, domain and path. Ignores url query, fragment, and all other parts of the url. :param url: a string :return: a string """ parsed_url = urlparse(url) return urlunparse([ parsed_url.scheme, parsed_url.netloc, parsed_url.path, '', '', '' ])
0991502fcd696308d0fe50a06a7fa5e2e12703af
5,892
from pydash import get from jobflow.utils.find import find_key_value from typing import Any def find_and_get_references(arg: Any) -> tuple[OutputReference, ...]: """ Find and extract output references. This function works on nested inputs. For example, lists or dictionaries (or combinations of list and dictionaries) that contain output references. Parameters ---------- arg The argument to search for references. Returns ------- tuple[OutputReference] The output references as a tuple. """ if isinstance(arg, OutputReference): # if the argument is a reference then stop there return tuple([arg]) elif isinstance(arg, (float, int, str, bool)): # argument is a primitive, we won't find a reference here return tuple() arg = jsanitize(arg, strict=True, enum_values=True) # recursively find any reference classes locations = find_key_value(arg, "@class", "OutputReference") # deserialize references and return return tuple(OutputReference.from_dict(get(arg, loc)) for loc in locations)
a2b1873ecd921afbb3d254c0a0fe4706c0ca5d12
5,893
def get_fdr_thresh(p_values, alpha=0.05): """ Calculate the false discovery rate (FDR) multiple comparisons correction threshold for a list of p-values. :param p_values: list of p-values :param alpha: the uncorrected significance level being used (default = 0.05) :type p_values: numpy array :type alpha: float :returns: The FDR correction threshold :rtype: float """ sn = np.sort(p_values) sn = sn[np.isfinite(sn)] for i in range(len(sn)): p_crit = alpha * float(i+1) / float(len(sn)) if sn[i] <= p_crit: continue else: break return sn[i]
5182eef60be397fe9f13ecb4e5440adc1a9ffd00
5,894
import typing def _rescue_filter( flags: RescueRenderFlags, platform_filter: typing.Optional[Platforms], rescue: Rescue ) -> bool: """ determine whether the `rescue` object is one we care about Args: rescue: Returns: """ filters = [] if flags.filter_unassigned_rescues: # return whether any rats are assigned # either properly or via unidentified rats filters.append(not (bool(rescue.rats) or bool(rescue.unidentified_rats))) # use the active bool on rescue if we don't want inactives, otherwise True if flags.filter_active_rescues: filters.append(rescue.active) if flags.filter_inactive_rescues: filters.append(not rescue.active) if platform_filter: # if we are filtering on platform filters.append(rescue.platform is platform_filter) return not all(filters)
bb192e4fd8eeb811bb681cec6e60956a71a1c15b
5,895
def penalty(precision, alpha, beta, psi): """Penalty for time-varying graphical lasso.""" if isinstance(alpha, np.ndarray): obj = sum(a[0][0] * m for a, m in zip(alpha, map(l1_od_norm, precision))) else: obj = alpha * sum(map(l1_od_norm, precision)) if isinstance(beta, np.ndarray): obj += sum(b[0][0] * m for b, m in zip(beta, map(psi, precision[1:] - precision[:-1]))) else: obj += beta * psi(precision[1:] - precision[:-1]) return obj
e8563c82cb51a5e3efa25fac5647b782abecabdf
5,896
from propy.CTD import CalculateCTD from typing import Optional from typing import List def all_ctd_descriptors( G: nx.Graph, aggregation_type: Optional[List[str]] = None ) -> nx.Graph: """ Calculate all CTD descriptors based seven different properties of AADs. :param G: Protein Graph to featurise :type G: nx.Graph :param aggregation_type: Aggregation types to use over chains :type aggregation_type: List[Optional[str]] :return: Protein Graph with ctd_descriptors feature added. G.graph["ctd_descriptors_{chain | aggregation_type}"] :rtype: nx.Graph """ func = CalculateCTD feature_name = "ctd_descriptors" return compute_propy_feature( G, func=func, feature_name=feature_name, aggregation_type=aggregation_type, )
71dc1559b1d3f3a682e1a2107b0cd9fb49c57b9e
5,897
def get_report_hash(report: Report, hash_type: HashType) -> str: """ Get report hash for the given diagnostic. """ hash_content = None if hash_type == HashType.CONTEXT_FREE: hash_content = __get_report_hash_context_free(report) elif hash_type == HashType.PATH_SENSITIVE: hash_content = __get_report_hash_path_sensitive(report) elif hash_type == HashType.DIAGNOSTIC_MESSAGE: hash_content = __get_report_hash_diagnostic_message(report) else: raise Exception("Invalid report hash type: " + str(hash_type)) return __str_to_hash('|||'.join(hash_content))
6f0ba5edfcc49daa9f700857e8b6ba5cd5f7d1ba
5,898
import json def parse_json_file(json_file_path, allow_non_standard_comments=False): """ Parse a json file into a utf-8 encoded python dictionary :param json_file_path: The json file to parse :param allow_non_standard_comments: Allow non-standard comment ('#') tags in the file :return: Dictionary representation of the json file """ def _decode_list(list_data): rv = [] for item in list_data: if isinstance(item, unicode): item = item.encode('utf-8') elif isinstance(item, list): item = _decode_list(item) elif isinstance(item, dict): item = _decode_dict(item) rv.append(item) return rv def _decode_dict(dict_data): rv = {} for key, value in dict_data.iteritems(): if isinstance(key, unicode): key = key.encode('utf-8') if isinstance(value, unicode): value = value.encode('utf-8') elif isinstance(value, list): value = _decode_list(value) elif isinstance(value, dict): value = _decode_dict(value) rv[key] = value return rv try: if allow_non_standard_comments: # If we are reading non-standard json files where we are accepting '#' as comment tokens, then the # file must have CR/LF characters and will be read in line by line. with open(json_file_path) as json_file: json_lines = json_file.readlines() json_file_content = "" for json_line in json_lines: comment_index = json_line.find('#') literal_pound_index = json_line.find('##') if comment_index>=0 and comment_index != literal_pound_index: processed_line = json_line.split('#')[0].strip() else: if literal_pound_index>=0: processed_line = json_line.replace('##','#').strip() else: processed_line = json_line.strip() json_file_content += processed_line else: with open(json_file_path) as json_file: json_file_content = json_file.read() json_file_data = json.loads(json_file_content, object_hook=_decode_dict) return json_file_data except Exception as e: raise ValueError('Error reading {}: {}'.format(json_file_path, e.message))
0df1108aedb60f0b0e0919c6cc7a66dd736ff8ac
5,899