content
stringlengths
22
815k
id
int64
0
4.91M
def poll(module, function, args=(), kwargs={}, cron="", time="", delta_seconds=0, timeout=None, id_=None): """ create a poll job will polling a url then use result as kwargs to call callback function :param module: callback function's module name :param function: callback function's function name :param args: callback function's args :param kwargs: **kwargs for requests.post, callback will be {"result": r.json()} (r = requests.post(**kwargs)) :param cron: cron descripton like "* * * * *" :param time: the specified exec time :param delta_seconds: delay seconds :param timeout: timeout of polling request :param id_: poll job's id, default will create by random mush have "time" or "delta_seconds" or "cron" """ delta_seconds, task_info = get_task_info(**locals()) logger.info("polling delay %s: %s", delta_seconds, task_info) POLLING_QUEUE.put(task_info, delay=delta_seconds)
5,337,700
def get_piesocket_api_key(): """ Retrieves user's Piesocket API key. Returns: (str) Piesocket API key. Raises: (ImproperlyConfigured) if the Piesocket API key isn't specified in settings. """ return get_setting_or_raise( setting="PIESOCKET_API_KEY", setting_str="PieSocket API Key" )
5,337,701
def downsample(myarr,factor,estimator=np.mean): """ Downsample a 2D array by averaging over *factor* pixels in each axis. Crops upper edge if the shape is not a multiple of factor. This code is pure numpy and should be fast. keywords: estimator - default to mean. You can downsample by summing or something else if you want a different estimator (e.g., downsampling error: you want to sum & divide by sqrt(n)) """ ys,xs = myarr.shape crarr = myarr[:ys-(ys % int(factor)),:xs-(xs % int(factor))] dsarr = estimator(np.concatenate([[crarr[i::factor,j::factor] for i in range(factor)] for j in range(factor)]), axis=0) return dsarr
5,337,702
def getActiveWindow(): """Returns a Window object of the currently active Window.""" # Source: https://stackoverflow.com/questions/5286274/front-most-window-using-cgwindowlistcopywindowinfo windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID) for win in windows: if win['kCGWindowLayer'] == 0: return '%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, '')) # Temporary. For now, we'll just return the title of the active window. raise Exception('Could not find an active window.')
5,337,703
def _derivative_log(x): """Chain rule on natural log = (1/x)*(dx/dr)""" return _protected_inverse(x[0])[:, :, np.newaxis, np.newaxis]*x[1]
5,337,704
def svn_wc_merge_props(*args): """ svn_wc_merge_props(svn_wc_notify_state_t state, char path, svn_wc_adm_access_t adm_access, apr_hash_t baseprops, apr_array_header_t propchanges, svn_boolean_t base_merge, svn_boolean_t dry_run, apr_pool_t pool) -> svn_error_t """ return _wc.svn_wc_merge_props(*args)
5,337,705
def is_paused(): """ Return True if is_paused is set in the global settings table of the database. """ try: is_paused_val = Settings.objects.get().is_paused except ObjectDoesNotExist: is_paused_val = False return is_paused_val
5,337,706
def info_request(request): """Information request form.""" if request.method == 'POST': form = InfoRequestForm(request.POST) if form.is_valid(): cd = form.cleaned_data # create out recipient list to = [] # cleaned_data converts the data to a list so we do not # need to use getlist() for program in cd.get('academic_programs'): to = to + settings.CONTINUING_STUDIES_INFOREQUEST_RECIPIENTS[program] if settings.DEBUG: cd['to'] = to to = [settings.MANAGERS[0][1]] subject = 'OCS Information Request' send_mail( request, to, subject, cd['email'], 'admissions/inforequest.txt', cd, ) return HttpResponseRedirect(reverse_lazy('info_request_success')) else: form = InfoRequestForm() return render(request, 'admissions/inforequest.html', {'form': form})
5,337,707
def make_content_based_dataset(main_set): """ Make a dataset to work on for the content based filtering approach """ main_columns = [ "appid", "english", "name", "developer", "steamspy_tags", "positive_ratings", "negative_ratings" ] desc_columns = [ "steam_appid", "about_the_game" ] main_df = pd.read_csv(f"sets/{main_set}.csv", usecols=main_columns) desc_df = pd.read_csv("sets/steam_description_data.csv", usecols=desc_columns) desc_df.rename(columns={"steam_appid": "appid",}, inplace=True) # make it the same name joined_df = main_df.merge(desc_df, on="appid") joined_df = joined_df[joined_df["english"] == 1].reset_index(drop=True) # remove games which don't have english translation joined_df = joined_df.drop("english", axis=1) # we don't need this column anymore joined_df["about_the_game"] = joined_df["about_the_game"].apply(preprocess_text_tfidf) # get the game description column ready for training joined_df = joined_df[pd.notnull(joined_df["about_the_game"])] # there may be nulls so remove them # Apply simple processing for developer and steam tags columns - this is done because that's how the dataset is made joined_df["developer"] = joined_df["developer"].apply(remove_semicolons) joined_df["steamspy_tags"] = joined_df["steamspy_tags"].apply(remove_semicolons) # Unite tags, name, developer and use it later for recommending joined_df["recommendation_info"] = joined_df["name"] + " " + joined_df["developer"] + " " + joined_df["steamspy_tags"] joined_df["recommendation_info"] = joined_df["recommendation_info"].apply(preprocess_text_countvec) # Save joined_df.to_csv("sets/content_based_dataset.csv", index=False)
5,337,708
def test_protocol_configuration_schema_is_valid_wrt_draft_04(): """Test that the JSON schema for the protocol configuration file is compliant with the specification Draft 04.""" protocol_config_schema = json.load( open( os.path.join( ROOT_DIR, "aea", "configurations", "schemas", "protocol-config_schema.json", ) ) ) Draft4Validator.check_schema(protocol_config_schema)
5,337,709
def validate_did_management_ext_ids_v100(ext_ids): """ Validates the ExtIDs of a DIDManagement entry. Parameters ---------- ext_ids: list of bytes The ExtIDs of the entry Raises ------ MalformedDIDManagementEntry If the ExtIDs are not valid. """ if not ( _validate_ext_ids_length(ext_ids, 2) and _validate_entry_type(ext_ids, EntryType.Create) and _validate_schema_version(ext_ids, ENTRY_SCHEMA_V100) ): raise MalformedDIDManagementEntry( "Invalid or missing {} entry ExtIDs".format(EntryType.Create.value) )
5,337,710
async def delete_workflow_revision( # pylint: disable=W0622 id: UUID, ) -> None: """Delete a transformation revision of type workflow from the data base. Deleting a transformation revision is only possible if it is in state DRAFT. This endpoint is deprecated and will be removed soon, use DELETE /api/transformations/{id} instead. """ logger.info("delete workflow %s", id) try: delete_single_transformation_revision(id, type=Type.WORKFLOW) logger.info("deleted workflow %s", id) except DBTypeError as e: raise HTTPException(status.HTTP_401_UNAUTHORIZED, detail=str(e)) from e except DBBadRequestError as e: raise HTTPException(status.HTTP_403_FORBIDDEN, detail=str(e)) from e except DBNotFoundError as e: raise HTTPException(status.HTTP_404_NOT_FOUND, detail=str(e)) from e
5,337,711
def test_invalid_isbn(talker): """Sometimes Marvel API sends number for isbn""" murpg = talker.comics({"id": 1143}).comics[0] assert murpg.isbn == "785110283" assert murpg.prices.print == 9.99
5,337,712
def get_accurate(clustering_res_df, cluster_number, error=False): """ :param clustering_res_df: a pandas DataFrame about clustering result :param cluster_number: the number of the cluster (the first column is the index, the second column is the right information, the third column is the clustering information) :param error: if error=True, then return the error rate, else, return the accuracy rate :return: the clustering accuracy """ if clustering_res_df.shape[1] != 3: raise Exception("Shape Error: the input DataFrame's column number is not 3") real_dict = {} clustering_dict = {} for i in range(cluster_number): real_df = clustering_res_df.loc[clustering_res_df['ClusterInfo'] == i] clustering_df = clustering_res_df.loc[clustering_res_df['ClusterExp'] == i] real_dict[i] = real_df['IndexNum'].tolist() clustering_dict[i] = clustering_df['IndexNum'].tolist() accuracy_matrix = np.zeros((cluster_number, cluster_number)) for i in range(cluster_number): for j in range(cluster_number): accuracy_matrix[i][j] = len(set(real_dict[i]).intersection(set(clustering_dict[j]))) # for test # print("The accuracy matrix is: \n", accuracy_matrix) case_iterator = itertools.permutations(range(cluster_number), cluster_number) accurate = 0 for item in case_iterator: acc = sum([accuracy_matrix[i][item[i]] for i in range(cluster_number)]) if acc > accurate: accurate = acc if not error: return accurate / clustering_res_df.shape[0] else: return 1 - accurate / clustering_res_df.shape[0]
5,337,713
def remove_old_now_linear_bend(atoms, intcos): """For given bend [A,B,C], remove any regular bends as well as any torsions which contain it """ logger = logging.getLogger(__name__) b = bend.Bend(atoms[0], atoms[1], atoms[2]) logger.info("Removing Old Linear Bend") logger.info(str(b) + "\n") intcos[:] = [coord for coord in intcos if coord != b] intcos[:] = [coord for coord in intcos if not (isinstance(coord, tors.Tors) and tors_contains_bend(b, coord))]
5,337,714
def odict_to_json(odict): """ Dump an OrderedDict into JSON series """ import json json_series = json.dumps(odict) return json_series
5,337,715
def parenthesize(node: ast.AST, _nl_able: bool = False) -> str: """Wrap the un-parsed node in parentheses.""" return f"({unparse(node, True)})"
5,337,716
def stack_nested_arrays(nested_arrays): """Stack/batch a list of nested numpy arrays. Args: nested_arrays: A list of nested numpy arrays of the same shape/structure. Returns: A nested array containing batched items, where each batched item is obtained by stacking corresponding items from the list of nested_arrays. """ nested_arrays_flattened = [tf.nest.flatten(a) for a in nested_arrays] batched_nested_array_flattened = [ np.stack(a) for a in zip(*nested_arrays_flattened) ] return tf.nest.pack_sequence_as(nested_arrays[0], batched_nested_array_flattened)
5,337,717
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the Serial sensor platform.""" name = config.get(CONF_NAME) port = config.get(CONF_SERIAL_PORT) baudrate = config.get(CONF_BAUDRATE) bytesize = config.get(CONF_BYTESIZE) parity = config.get(CONF_PARITY) stopbits = config.get(CONF_STOPBITS) xonxoff = config.get(CONF_XONXOFF) rtscts = config.get(CONF_RTSCTS) dsrdtr = config.get(CONF_DSRDTR) if (value_template := config.get(CONF_VALUE_TEMPLATE)) is not None: value_template.hass = hass sensor = SerialSensor( name, port, baudrate, bytesize, parity, stopbits, xonxoff, rtscts, dsrdtr, value_template, ) hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, sensor.stop_serial_read) async_add_entities([sensor], True)
5,337,718
def test_HollowSphereSelector(): """ Test that the HollowSphereSelector selects the right number of entities """ sphere = ( cq.Workplane().sphere(5).polarArray(7, 30, 120, 2, rotate=True).box(1, 2, 1) ) vertices = sphere.vertices(HollowSphereSelector((0, 0, 0), 7, 5.5, debug=True)) edges = sphere.edges(HollowSphereSelector((0, 0, 0), 9, 7.5, debug=True)) faces = sphere.faces(HollowSphereSelector((0, 0, 0), 9, 7.5, debug=True)) solids = sphere.solids(HollowSphereSelector((0, 0, 0), 9, 6, debug=True)) assert vertices.size() == 8 assert edges.size() == 8 assert faces.size() == 2 assert solids.size() == 2
5,337,719
def temporal_statistics(da, stats): """ Obtain generic temporal statistics using the hdstats temporal library: https://github.com/daleroberts/hdstats/blob/master/hdstats/ts.pyx last modified June 2020 Parameters ---------- da : xarray.DataArray DataArray should contain a 3D time series. stats : list list of temporal statistics to calculate. Options include: 'discordance' = 'f_std' = std of discrete fourier transform coefficients, returns three layers: f_std_n1, f_std_n2, f_std_n3 'f_mean' = mean of discrete fourier transform coefficients, returns three layers: f_mean_n1, f_mean_n2, f_mean_n3 'f_median' = median of discrete fourier transform coefficients, returns three layers: f_median_n1, f_median_n2, f_median_n3 'mean_change' = mean of discrete difference along time dimension 'median_change' = median of discrete difference along time dimension 'abs_change' = mean of absolute discrete difference along time dimension 'complexity' = 'central_diff' = 'num_peaks' : The number of peaks in the timeseries, defined with a local window of size 10. NOTE: This statistic is very slow Outputs ------- xarray.Dataset containing variables for the selected temporal statistics """ # if dask arrays then map the blocks if dask.is_dask_collection(da): if version.parse(xr.__version__) < version.parse("0.16.0"): raise TypeError( "Dask arrays are only supported by this function if using, " + "xarray v0.16, run da.compute() before passing dataArray." ) # create a template that matches the final datasets dims & vars arr = da.isel(time=0).drop("time") # deal with the case where fourier is first in the list if stats[0] in ("f_std", "f_median", "f_mean"): template = xr.zeros_like(arr).to_dataset(name=stats[0] + "_n1") template[stats[0] + "_n2"] = xr.zeros_like(arr) template[stats[0] + "_n3"] = xr.zeros_like(arr) for stat in stats[1:]: if stat in ("f_std", "f_median", "f_mean"): template[stat + "_n1"] = xr.zeros_like(arr) template[stat + "_n2"] = xr.zeros_like(arr) template[stat + "_n3"] = xr.zeros_like(arr) else: template[stat] = xr.zeros_like(arr) else: template = xr.zeros_like(arr).to_dataset(name=stats[0]) for stat in stats: if stat in ("f_std", "f_median", "f_mean"): template[stat + "_n1"] = xr.zeros_like(arr) template[stat + "_n2"] = xr.zeros_like(arr) template[stat + "_n3"] = xr.zeros_like(arr) else: template[stat] = xr.zeros_like(arr) try: template = template.drop('spatial_ref') except: pass # ensure the time chunk is set to -1 da_all_time = da.chunk({"time": -1}) # apply function across chunks lazy_ds = da_all_time.map_blocks( temporal_statistics, kwargs={"stats": stats}, template=template ) try: crs = da.geobox.crs lazy_ds = assign_crs(lazy_ds, str(crs)) except: pass return lazy_ds # If stats supplied is not a list, convert to list. stats = stats if isinstance(stats, list) else [stats] # grab all the attributes of the xarray x, y, time, attrs = da.x, da.y, da.time, da.attrs # deal with any all-NaN pixels by filling with 0's mask = da.isnull().all("time") da = da.where(~mask, other=0) # complete timeseries print("Completing...") da = fast_completion(da) # ensure dim order is correct for functions da = da.transpose("y", "x", "time").values stats_dict = { "discordance": lambda da: hdstats.discordance(da, n=10), "f_std": lambda da: hdstats.fourier_std(da, n=3, step=5), "f_mean": lambda da: hdstats.fourier_mean(da, n=3, step=5), "f_median": lambda da: hdstats.fourier_median(da, n=3, step=5), "mean_change": lambda da: hdstats.mean_change(da), "median_change": lambda da: hdstats.median_change(da), "abs_change": lambda da: hdstats.mean_abs_change(da), "complexity": lambda da: hdstats.complexity(da), "central_diff": lambda da: hdstats.mean_central_diff(da), "num_peaks": lambda da: hdstats.number_peaks(da, 10), } print(" Statistics:") # if one of the fourier functions is first (or only) # stat in the list then we need to deal with this if stats[0] in ("f_std", "f_median", "f_mean"): print(" " + stats[0]) stat_func = stats_dict.get(str(stats[0])) zz = stat_func(da) n1 = zz[:, :, 0] n2 = zz[:, :, 1] n3 = zz[:, :, 2] # intialise dataset with first statistic ds = xr.DataArray( n1, attrs=attrs, coords={"x": x, "y": y}, dims=["y", "x"] ).to_dataset(name=stats[0] + "_n1") # add other datasets for i, j in zip([n2, n3], ["n2", "n3"]): ds[stats[0] + "_" + j] = xr.DataArray( i, attrs=attrs, coords={"x": x, "y": y}, dims=["y", "x"] ) else: # simpler if first function isn't fourier transform first_func = stats_dict.get(str(stats[0])) print(" " + stats[0]) ds = first_func(da) # convert back to xarray dataset ds = xr.DataArray( ds, attrs=attrs, coords={"x": x, "y": y}, dims=["y", "x"] ).to_dataset(name=stats[0]) # loop through the other functions for stat in stats[1:]: print(" " + stat) # handle the fourier transform examples if stat in ("f_std", "f_median", "f_mean"): stat_func = stats_dict.get(str(stat)) zz = stat_func(da) n1 = zz[:, :, 0] n2 = zz[:, :, 1] n3 = zz[:, :, 2] for i, j in zip([n1, n2, n3], ["n1", "n2", "n3"]): ds[stat + "_" + j] = xr.DataArray( i, attrs=attrs, coords={"x": x, "y": y}, dims=["y", "x"] ) else: # Select a stats function from the dictionary # and add to the dataset stat_func = stats_dict.get(str(stat)) ds[stat] = xr.DataArray( stat_func(da), attrs=attrs, coords={"x": x, "y": y}, dims=["y", "x"] ) # try to add back the geobox try: crs = da.geobox.crs ds = assign_crs(ds, str(crs)) except: pass return ds
5,337,720
def encode_data(data): """ Helper that converts :class:`str` or :class:`bytes` to :class:`bytes`. :class:`str` are encoded with UTF-8. """ # Expect str or bytes, return bytes. if isinstance(data, str): return data.encode('utf-8') elif isinstance(data, bytes): return data else: raise TypeError("data must be bytes or str")
5,337,721
def get_apps_final(the_apps_dummy): """ 计算出: 1.每个用户安装app的数量; 2.每个用户安装小众app的数量; 3.每个用户安装大众app的数量; 4.根据每个用户安装app的向量进行Mean-shift聚类的结果 """ core_data = the_apps_dummy.drop(['id'], axis=1) the_apps_final = get_minor_major(core_data, 'apps', 5, 90) # new_core_data = col_cluster(core_data, n_cluster, 'app') # the_apps_final = pd.concat([apps_minor_major, new_core_data], axis=1) the_apps_final['id'] = the_apps_dummy['id'] return the_apps_final
5,337,722
def get_linked_events(user, dt, limit=None, load_also=()): """Get the linked events and the user's roles in them :param user: A `User` :param dt: Only include events taking place on/after that date :param limit: Max number of events """ from indico.modules.events.abstracts.util import (get_events_with_abstract_reviewer_convener, get_events_with_abstract_persons) from indico.modules.events.contributions.util import get_events_with_linked_contributions from indico.modules.events.papers.util import get_events_with_paper_roles from indico.modules.events.registration.util import get_events_registered from indico.modules.events.sessions.util import get_events_with_linked_sessions from indico.modules.events.surveys.util import get_events_with_submitted_surveys from indico.modules.events.util import (get_events_managed_by, get_events_created_by, get_events_with_linked_event_persons) links = OrderedDict() for event_id in get_events_registered(user, dt): links.setdefault(event_id, set()).add('registration_registrant') for event_id in get_events_with_submitted_surveys(user, dt): links.setdefault(event_id, set()).add('survey_submitter') for event_id in get_events_managed_by(user, dt): links.setdefault(event_id, set()).add('conference_manager') for event_id in get_events_created_by(user, dt): links.setdefault(event_id, set()).add('conference_creator') for event_id, principal_roles in get_events_with_linked_sessions(user, dt).iteritems(): links.setdefault(event_id, set()).update(principal_roles) for event_id, principal_roles in get_events_with_linked_contributions(user, dt).iteritems(): links.setdefault(event_id, set()).update(principal_roles) for event_id, role in get_events_with_linked_event_persons(user, dt).iteritems(): links.setdefault(event_id, set()).add(role) for event_id, roles in get_events_with_abstract_reviewer_convener(user, dt).iteritems(): links.setdefault(event_id, set()).update(roles) for event_id, roles in get_events_with_abstract_persons(user, dt).iteritems(): links.setdefault(event_id, set()).update(roles) for event_id, roles in get_events_with_paper_roles(user, dt).iteritems(): links.setdefault(event_id, set()).update(roles) if not links: return OrderedDict() query = (Event.query .filter(~Event.is_deleted, Event.id.in_(links)) .options(joinedload('series'), joinedload('label'), load_only('id', 'category_id', 'title', 'start_dt', 'end_dt', 'series_id', 'series_pos', 'series_count', 'label_id', 'label_message', *load_also)) .order_by(Event.start_dt, Event.id)) if limit is not None: query = query.limit(limit) return OrderedDict((event, links[event.id]) for event in query)
5,337,723
def QA_SU_save_etf_min(client=DATABASE, ui_log=None, ui_progress=None): """save etf_min Keyword Arguments: client {[type]} -- [description] (default: {DATABASE}) """ __index_list = QA_fetch_get_stock_list('etf') coll = client.index_min coll.create_index( [ ('code', pymongo.ASCENDING), ('time_stamp', pymongo.ASCENDING), ('date_stamp', pymongo.ASCENDING) ] ) err = [] def __saving_work(code, coll): QA_util_log_info( '##JOB07 Now Saving ETF_MIN ==== {}'.format(str(code)), ui_log=ui_log ) try: for type in ['1min', '5min', '15min', '30min', '60min']: ref_ = coll.find({'code': str(code)[0:6], 'type': type}) end_time = str(now_time())[0:19] if ref_.count() > 0: start_time = ref_[ref_.count() - 1]['datetime'] QA_util_log_info( '##JOB07.{} Now Saving {} from {} to {} =={} '.format( ['1min', '5min', '15min', '30min', '60min'].index(type), str(code), start_time, end_time, type ), ui_log=ui_log ) if start_time != end_time: __data = QA_fetch_get_index_min( str(code), start_time, end_time, type ) if len(__data) > 1: coll.insert_many( QA_util_to_json_from_pandas(__data[1::]) ) else: start_time = '2015-01-01' QA_util_log_info( '##JOB07.{} Now Saving {} from {} to {} =={} '.format( ['1min', '5min', '15min', '30min', '60min'].index(type), str(code), start_time, end_time, type ), ui_log=ui_log ) if start_time != end_time: __data = QA_fetch_get_index_min( str(code), start_time, end_time, type ) if len(__data) > 1: coll.insert_many( QA_util_to_json_from_pandas(__data) ) except: err.append(code) executor = ThreadPoolExecutor(max_workers=4) res = { executor.submit(__saving_work, __index_list.index[i_][0], coll) for i_ in range(len(__index_list)) } # multi index ./. count = 1 for i_ in concurrent.futures.as_completed(res): QA_util_log_info( 'The {} of Total {}'.format(count, len(__index_list)), ui_log=ui_log ) strLogProgress = 'DOWNLOAD PROGRESS {} '.format( str(float(count / len(__index_list) * 100))[0:4] + '%' ) intLogProgress = int(float(count / len(__index_list) * 10000.0)) QA_util_log_info( strLogProgress, ui_log=ui_log, ui_progress=ui_progress, ui_progress_int_value=intLogProgress ) count = count + 1 if len(err) < 1: QA_util_log_info('SUCCESS', ui_log=ui_log) else: QA_util_log_info(' ERROR CODE \n ', ui_log=ui_log) QA_util_log_info(err, ui_log=ui_log)
5,337,724
def test_hapd_ctrl_level(dev, apdev): """hostapd and LEVEL ctrl_iface command""" ssid = "hapd-ctrl" params = { "ssid": ssid } hapd = hostapd.add_ap(apdev[0], params) if "FAIL" not in hapd.request("LEVEL 0"): raise Exception("Unexpected LEVEL success on non-monitor interface")
5,337,725
def get_run_callback(run_text, output_dir): """ function to generate tf run callback for tensor board """ root_logdir = f'{output_dir.rstrip("/")}/tensorboard_logs/' run_id = time.strftime(f'{run_text}_%Y_%m_%d-%H-%M-%S') log_path = os.path.join(root_logdir, run_id) tensorboad_callback = tf.keras.callbacks.TensorBoard(log_path) return tensorboad_callback
5,337,726
def set_seed(seed: int): """ Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch``. Args: seed (:obj:`int`): The seed to set. """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed)
5,337,727
def bert_keyword_expansion(keywords: str, num_similar: int, bert_model, tokenizer, bert_embedding_dict): """ Keyword expansion outputs top N most similar words from vocabulary. @param keywords: string of comma-separated words to find keywords for @param num_similar: number of similar words to return @param bert_model: BERT embedding model @param tokenizer: BERT tokenizer object @param bert_embedding_dict: KeyedVectors object storing BERT-generated embeddings @return: list of top N most similar words in order of descending similarity """ assert isinstance(keywords, str) keywords = utils.rm_punct(keywords) keywords = utils.lower(keywords) # Assuming input is a string of space separated words keyword_list = set(keywords.split()) # Dictionary used to store similarity scores for top keywords scores_dict = defaultdict(int) for keyword in keyword_list: # Check if keyword is in the BERT embedding dictionary # If not, we create a vector representation of it first if keyword not in bert_embedding_dict.vocab: keyword = bert_embedding(keyword, tokenizer, bert_model, to_numpy=True) # Returns a list of tuples in the form (word, similarity score) result = utils.find_similar_keyword_in_vocab(keyword=keyword, num_similar=num_similar, model=bert_embedding_dict, similarity_score=True) for word, score in result: # Skipping similar words that already in the list of keywords provided by user if word in keyword_list: continue else: # Keeping the maximum similarity score for each word scores_dict[word] = max(scores_dict[word], score) sorted_results = sorted(scores_dict.items(), key=lambda kv: kv[1], reverse=True)[:num_similar] return [word for word, score in sorted_results]
5,337,728
def utc_to_tt_offset(jday=None): """Returns the offset in seconds from a julian date in Terrestrial Time (TT) to a Julian day in Coordinated Universal Time (UTC)""" if use_numpy: return utc_to_tt_offset_numpy(jday) else: return utc_to_tt_offset_math(jday)
5,337,729
async def get_async_request(url: str) -> [int, Any]: """Get the data from the url provided. Parameters ---------- url: str url to get the data from Returns ------- [int, Any] Tuple with the Response status code and the data returned from the request """ async with aiohttp.ClientSession() as session: async with session.get(url) as response: data = await response.json() return [response.status, data]
5,337,730
def scattering_probability(H, psi0, n_emissions, c_ops, tlist, system_zero_state=None, construct_effective_hamiltonian=True): """ Compute the integrated probability of scattering n photons in an arbitrary system. This function accepts a nonlinearly spaced array of times. Parameters ---------- H : :class: qutip.Qobj or list System-waveguide(s) Hamiltonian or effective Hamiltonian in Qobj or list-callback format. If construct_effective_hamiltonian is not specified, an effective Hamiltonian is constructed from H and `c_ops`. psi0 : :class: qutip.Qobj Initial state density matrix :math:`\\rho(t_0)` or state vector :math:`\\psi(t_0)`. n_emissions : int Number of photons emitted by the system (into any combination of waveguides). c_ops : list List of collapse operators for each waveguide; these are assumed to include spontaneous decay rates, e.g. :math:`\\sigma = \\sqrt \\gamma \\cdot a`. tlist : array_like List of times for :math:`\\tau_i`. tlist should contain 0 and exceed the pulse duration / temporal region of interest; tlist need not be linearly spaced. system_zero_state : :class: qutip.Qobj State representing zero excitations in the system. Defaults to `basis(systemDims, 0)`. construct_effective_hamiltonian : bool Whether an effective Hamiltonian should be constructed from H and c_ops: :math:`H_{eff} = H - \\frac{i}{2} \\sum_n \\sigma_n^\\dagger \\sigma_n` Default: True. Returns ------- scattering_prob : float The probability of scattering n photons from the system over the time range specified. """ phi_n = temporal_scattered_state(H, psi0, n_emissions, c_ops, tlist, system_zero_state, construct_effective_hamiltonian) T = len(tlist) W = len(c_ops) # Compute <omega_tau> for all combinations of tau all_emission_indices = combinations_with_replacement(range(T), n_emissions) probs = np.zeros([T] * n_emissions) # Project scattered state onto temporal basis for emit_indices in all_emission_indices: # Consider unique emission time partitionings partition = tuple(set(set_partition(emit_indices, W))) # wg_indices_list = list(set_partition(indices, W)) for wg_indices in partition: projector = temporal_basis_vector(wg_indices, T) amplitude = projector.dag() * phi_n probs[emit_indices] += np.real(amplitude.conjugate() * amplitude) # Iteratively integrate to obtain single value while probs.shape != (): probs = np.trapz(probs, x = tlist) return np.abs(probs)
5,337,731
def analytical_value_cond_i_shannon(distr, par): """ Analytical value of the conditional Shannon mutual information. Parameters ---------- distr : str-s Names of the distributions; 'normal'. par : dictionary Parameters of the distribution. If distr is 'normal': par["cov"] and par["ds"] are the (joint) covariance matrix and the vector of subspace dimensions. Returns ------- cond_i : float Analytical value of the conditional Shannon mutual information. """ # initialization: ds = par['ds'] len_ds = len(ds) # 0,d_1,d_1+d_2,...,d_1+...+d_M; starting indices of the subspaces: cum_ds = cumsum(hstack((0, ds[:-1]))) idx_condition = range(cum_ds[len_ds - 1], cum_ds[len_ds - 1] + ds[len_ds - 1]) if distr == 'normal': c = par['cov'] # h_joint: h_joint = analytical_value_h_shannon(distr, par) # h_cross: h_cross = 0 for m in range(len_ds-1): # non-conditioning subspaces idx_m = range(cum_ds[m], cum_ds[m] + ds[m]) idx_m_and_condition = hstack((idx_m, idx_condition)) par = {"cov": c[ix_(idx_m_and_condition, idx_m_and_condition)]} h_cross += analytical_value_h_shannon(distr, par) # h_condition: par = {"cov": c[ix_(idx_condition, idx_condition)]} h_condition = analytical_value_h_shannon(distr, par) cond_i = -h_joint + h_cross - (len_ds - 2) * h_condition else: raise Exception('Distribution=?') return cond_i
5,337,732
def upload_file(localpath, s3path): """ Uploads a file to s3 :param localpath: The local path :param s3path: The s3 path in format s3://mybucket/mydir/mysample.txt """ bucket, key = get_bucketname_key(s3path) if key.endswith("/"): key = "{}{}".format(key, os.path.basename(localpath)) s3 = boto3.client('s3') s3.upload_file(localpath, bucket, key)
5,337,733
def convert_to_tub_v2(paths, output_path): """ Convert from old tubs to new one :param paths: legacy tub paths :param output_path: new tub output path :return: None """ empty_record = {'__empty__': True} if type(paths) is str: paths = [paths] legacy_tubs = [LegacyTub(path) for path in paths] print(f'Total number of tubs: {len(legacy_tubs)}') for legacy_tub in legacy_tubs: # add input and type for empty records recording inputs = legacy_tub.inputs + ['__empty__'] types = legacy_tub.types + ['boolean'] output_tub = Tub(output_path, inputs, types, list(legacy_tub.meta.items())) record_paths = legacy_tub.gather_records() bar = IncrementalBar('Converting', max=len(record_paths)) previous_index = None for record_path in record_paths: try: contents = Path(record_path).read_text() record = json.loads(contents) image_path = record['cam/image_array'] ms = record['milliseconds'] current_index = int(image_path.split('_')[0]) image_path = os.path.join(legacy_tub.path, image_path) image_data = Image.open(image_path) record['cam/image_array'] = image_data # first record or they are continuous, just append if not previous_index or current_index == previous_index + 1: output_tub.write_record(record, ms) previous_index = current_index # otherwise fill the gap with empty records else: # Skipping over previous record here because it has # already been written. previous_index += 1 # Adding empty record nodes, and marking them deleted # until the next valid record. delete_list = [] while previous_index < current_index: idx = output_tub.manifest.current_index output_tub.write_record(empty_record, ms) delete_list.append(idx) previous_index += 1 output_tub.delete_records(delete_list) bar.next() except Exception as exception: print(f'Ignoring record path {record_path}\n', exception) traceback.print_exc() # writing session id into manifest metadata output_tub.close()
5,337,734
def approve(credentials, assignment_id): """Approve an assignment""" # Connect to MTurk mturk = connect(credentials['PRODUCTION']) # Skip if already processed assignment = mturk.get_assignment(AssignmentId=assignment_id) if assignment['Assignment']['AssignmentStatus'] == 'Submitted': # Approve assignment mturk.approve_assignment(AssignmentId=assignment_id)
5,337,735
def print_cuda_memory(gpu: int = 0): """ Prints current memory stats of gpu. """ t = torch.cuda.get_device_properties(gpu).total_memory c = torch.cuda.memory_cached(gpu) a = torch.cuda.memory_allocated(gpu) print("GPU {}".format(gpu)) print("\tTotal memory: {}".format(t)) print("\tCached memory: {}".format(c)) print("\tAllocated memory: {}".format(a))
5,337,736
def zexp(input, i=None): """ Point-wise complex exponential. :param input array: :param i bool: imaginary """ usage_string = "zexp [-i] input output" cmd_str = f'{BART_PATH} ' cmd_str += 'zexp ' flag_str = '' opt_args = f'' multituples = [] if i is not None: flag_str += f'-i ' cmd_str += flag_str + opt_args + ' ' cmd_str += f"{' '.join([' '.join([str(x) for x in arg]) for arg in zip(*multituples)]).strip()} {NAME}input {NAME}output " cfl.writecfl(NAME + 'input', input) if DEBUG: print(cmd_str) os.system(cmd_str) outputs = cfl.readcfl(NAME + 'output') return outputs
5,337,737
def ekin2wl(ekin): """Convert neutron kinetic energy in electronvolt to wavelength in Angstrom""" if _np and hasattr(ekin,'__len__'): #reciprocals without zero division: ekinnonzero = ekin != 0.0 ekininv = 1.0 / _np.where( ekinnonzero, ekin, 1.0)#fallback 1.0 wont be used return _c_ekin2wl * _np.sqrt(_np.where( ekinnonzero, ekininv, _np.inf)) else: return _rawfct['ncrystal_ekin2wl'](ekin)
5,337,738
def add_jinja2_extension(config, ext, name='.jinja2'): """ This function is added as a method of a :term:`Configurator`, and should not be called directly. Instead it should be called like so after ``pyramid_jinja2`` has been passed to ``config.include``: .. code-block:: python config.add_jinja2_extension(myext) It will add the Jinja2 extension passed as ``ext`` to the current :class:`jinja2.Environment` used by the renderer named ``name``. """ ext = config.maybe_dotted(ext) def register(): env = get_jinja2_environment(config, name) env.add_extension(ext) config.action(None, register, order=EXTRAS_CONFIG_PHASE)
5,337,739
def show_subscription(conn, customer): """ Retrieves authenticated user's plan and prints it. - Return type is a tuple, 1st element is a boolean and 2nd element is the response message from messages.py. - If the operation is successful; print the authenticated customer's plan and return tuple (True, CMD_EXECUTION_SUCCESS). - If any exception occurs; return tuple (False, CMD_EXECUTION_FAILED). Output should be like: #|Name|Resolution|Max Sessions|Monthly Fee 1|Basic|720P|2|30 """ try: cursor = conn.cursor() cursor.execute("SELECT plan_id " "FROM customer " "WHERE customer_id = %s", (customer.customer_id,)) queryCustomerPlanId = cursor.fetchall() if queryCustomerPlanId is None: # Fail if no such customer exists (somehow) return False, CMD_EXECUTION_FAILED else: cursor.execute("SELECT plan_id, plan_name, resolution, max_parallel_sessions, monthly_fee " "FROM plan " "WHERE plan_id = %s", (queryCustomerPlanId[0],)) queryPlan = cursor.fetchone() if queryPlan is None: # Fail if no such plan exists return False, CMD_EXECUTION_FAILED print("#|Name|Resolution|Max Sessions|Monthly Fee") print("{0}|{1}|{2}|{3}|{4}".format(queryPlan[0], queryPlan[1], queryPlan[2], queryPlan[3], queryPlan[4])) cursor.close() return True, CMD_EXECUTION_SUCCESS except Exception as e: return False, CMD_EXECUTION_FAILED
5,337,740
def check_atoms_coordinates(mol): """ Function to check if a molecule contains zero coordinates in all atoms. Then this molecule must be eliminated. Returns True if molecules is OK and False if molecule contains zero coordinates. Example: # Load test set to a frame sdf = 'miniset.sdf' df = pt.LoadSDF(sdf, molColName='mol3DProt') ## Checking if molecule contains only ZERO coordinates, ## then remove that molecules from dataset df['check_coordinates'] = [checkAtomsCoordinates(x) for x in df.mol3DProt] df_eliminated_mols = dfl[df.check_coordinates == False] df = df[df.check_coordinates == True] df.drop(columns=['check_coordinates'], inplace=True) print('final minitest set:', df.shape[0]) print('minitest eliminated:', df_eliminated_mols.shape[0]) """ conf = mol.GetConformer() position = [] for i in range(conf.GetNumAtoms()): pos = conf.GetAtomPosition(i) position.append([pos.x, pos.y, pos.z]) position = np.array(position) if not np.any(position): raise RuntimeError("molecule has no conformers")
5,337,741
def update_hosts_file(*flags): """ Wrapper around running updateHostsFile.py Parameters ---------- flags : varargs Commandline flags to pass into updateHostsFile.py. For more info, run the following command in the terminal or command prompt: ``` python updateHostsFile.py -h ``` """ if subprocess.call([sys.executable, "updateHostsFile.py"] + list(flags)): print_failure("Failed to update hosts file")
5,337,742
def maybe_gen_fake_data_based_on_real_data( image, label, reso, min_fake_lesion_ratio, gen_fake_probability): """Remove real lesion and synthesize lesion.""" # TODO(lehou): Replace magic numbers with flag variables. gen_prob_indicator = tf.random_uniform( shape=[], minval=0.0, maxval=1.0, dtype=tf.float32) background_mask = tf.less(label, 0.5) lesion_mask = tf.greater(label, 1.5) liver_mask = tf.logical_not(tf.logical_or(background_mask, lesion_mask)) liver_intensity = tf.boolean_mask(image, liver_mask) lesion_intensity = tf.boolean_mask(image, lesion_mask) intensity_diff = tf.reduce_mean(liver_intensity) - ( tf.reduce_mean(lesion_intensity)) intensity_diff *= 1.15 intensity_diff = tf.cond(tf.is_nan(intensity_diff), lambda: 0.0, lambda: intensity_diff) lesion_liver_ratio = 0.0 lesion_liver_ratio += tf.random.normal(shape=[], mean=0.01, stddev=0.01) lesion_liver_ratio += tf.random.normal(shape=[], mean=0.0, stddev=0.05) lesion_liver_ratio = tf.clip_by_value( lesion_liver_ratio, min_fake_lesion_ratio, min_fake_lesion_ratio + 0.20) fake_lesion_mask = tf.logical_and( _gen_rand_mask(ratio_mean=lesion_liver_ratio, ratio_stddev=0.0, scale=reso // 32, shape=label.shape, smoothness=reso // 32), tf.logical_not(background_mask)) liver_mask = tf.logical_not(tf.logical_or(background_mask, fake_lesion_mask)) # Blur the masks lesion_mask_blur = tf.squeeze(tf.nn.conv3d( tf.expand_dims(tf.expand_dims(tf.cast(lesion_mask, tf.float32), -1), 0), filter=tf.ones([reso // 32] * 3 + [1, 1], tf.float32) / (reso // 32) ** 3, strides=[1, 1, 1, 1, 1], padding='SAME')) fake_lesion_mask_blur = tf.squeeze(tf.nn.conv3d( tf.expand_dims(tf.expand_dims( tf.cast(fake_lesion_mask, tf.float32), -1), 0), filter=tf.ones([reso // 32] * 3 + [1, 1], tf.float32) / (reso // 32) ** 3, strides=[1, 1, 1, 1, 1], padding='SAME')) # Remove real lesion and add fake lesion. # If the intensitify is too small (maybe no liver or lesion region labeled), # do not generate fake data. gen_prob_indicator = tf.cond( tf.greater(intensity_diff, 0.0001), lambda: gen_prob_indicator, lambda: 0.0) # pylint: disable=g-long-lambda image = tf.cond( tf.greater(gen_prob_indicator, 1 - gen_fake_probability), lambda: image + intensity_diff * lesion_mask_blur \ - intensity_diff * fake_lesion_mask_blur, lambda: image) label = tf.cond( tf.greater(gen_prob_indicator, 1 - gen_fake_probability), lambda: tf.cast(background_mask, tf.float32) * 0 + \ tf.cast(liver_mask, tf.float32) * 1 + \ tf.cast(fake_lesion_mask, tf.float32) * 2, lambda: label) # pylint: enable=g-long-lambda return image, label
5,337,743
def is_primitive_type (v) : """ Check to see if v is primitive. Primitive in this context means NOT a container type (str is the exception): primitives type are: int, float, long, complex, bool, None, str """ return type(v) in {int:0, float:0, long:0, complex:0, bool:0, None:0, str:0}
5,337,744
def _get_bit(h, i): """Return specified bit from string for subsequent testing""" h1 = int.from_bytes(h, 'little') return (h1 >> i) & 0x01
5,337,745
def go_down_right_reward(nobs, high_pos, agent_num, act): """ Return a reward for going to the low or right side of the board :param nobs: The current observation :param high_pos: Tuple of lowest and most-right position :param agent_num: The id of the agent to check (0-3) :return: The reward for going down or right """ # only give rewards if a new highest point is reached bomb_bonus = 0 if act[agent_num] == 5: bomb_bonus = 0.00 if nobs[agent_num]['position'][0] > high_pos[0]: return 1 + bomb_bonus, (nobs[agent_num]['position'][0], high_pos[1]) elif nobs[agent_num]['position'][1] > high_pos[1]: return 1 + bomb_bonus, (high_pos[0], nobs[agent_num]['position'][1]) else: return 0 + bomb_bonus, high_pos
5,337,746
def getChannelBoxMenu(): """ Get ChannelBox Menu, convert the main channel box to QT and return the Edit QMenu which is part of the channel box' children. :return: Maya's main channel box menu :rtype: QMenu """ channelBox = getChannelBox() # find widget menus = channelBox.findChildren(QMenu) # find Edit menu for menu in menus: if menu.menuAction().text() == "Edit": return menu
5,337,747
def make_sid_cookie(sid, uri): """Given a sid (from a set-cookie) figure out how to send it back""" # sometime near 0.92, port got dropped... # uritype, uribody = urllib.splittype(uri) # host, path = urllib.splithost(uribody) # host, port = urllib.splitnport(host) # if port == -1: # port = dict(http=80, https=443)[uritype] # we want to throw here cookiename = "JIFTY_SID_HIVEMINDER" return "%s=%s" % (cookiename, sid)
5,337,748
def identity_filter(element_tuple): """ element_tuple est consitute des (name, attrs) de chaque element XML recupere par la methode startElement """ return element_tuple
5,337,749
def EoZ(N2, w0, f, ): """ Wave ray energy when variations can only occur in the vertical (i.e. N2 and flow only vary with depth not horizontally) - Olbers 1981 """ Ez = np.squeeze((w0**2 * (N2 - f**2)) / ((w0**2 - f**2)**(3 / 2) * (N2 - w0**2)**(1 / 2))) return Ez
5,337,750
def safe_string(value: Any) -> str: """ Consistently converts a value to a string. :param value: The value to stringify. """ if isinstance(value, bytes): return value.decode() return str(value)
5,337,751
def test_example_4p2(): """Test example 4.2 in Pozar.""" # X-band waveguide dimensions a = 2.285 * sc.centi b = 1.016 * sc.centi # Rexolite er_mag = 2.54 # Frequency f = 10 * sc.giga # Propagation constants beta_a = wg.phase_constant(f, a, b, er=1) beta_d = wg.phase_constant(f, a, b, er=er_mag) assert beta_a == approx(158.0, abs=0.5) assert beta_d == approx(304.1, abs=0.5) # Characteristic impedance z0_a = wg.impedance(f, a, b, er=1, ur=1, m=1, n=0, mode='TE') z0_d = wg.impedance(f, a, b, er=er_mag, ur=1, m=1, n=0, mode='TE') with pytest.raises(ValueError): wg.impedance(f, a, b, mode='TEM') print(z0_a) print(z0_d) assert z0_a == approx(500.0, abs=1) assert z0_d == approx(259.6, abs=1)
5,337,752
def handle_older_version(upstream_version: Box) -> bool: """ Checks if the current version (local) is older than the upstream one and provides a message to the end-user. :return: :py:class:`True` if local is older. :py:class:`False` otherwise. """ version_utility = VersionUtility(PyFunceble.storage.PROJECT_VERSION) if PyFunceble.facility.ConfigLoader.is_already_loaded(): if PyFunceble.storage.CONFIGURATION.cli_testing.display_mode.quiet: message = "New version available." elif PyFunceble.storage.CONFIGURATION.cli_testing.display_mode.colour: message = ( f"{colorama.Style.BRIGHT}{colorama.Fore.GREEN}Please take the " "time to " f"update {PyFunceble.storage.PROJECT_NAME}!" f"{colorama.Style.RESET_ALL}\n" f"{colorama.Style.BRIGHT}Your version:{colorama.Style.RESET_ALL} " f"{PyFunceble.storage.PROJECT_VERSION}\n" f"{colorama.Style.BRIGHT}Upstream version:{colorama.Style.RESET_ALL} " f"{upstream_version.current_version}\n" ) else: message = ( f"Please take the time to update " f"{PyFunceble.storage.PROJECT_NAME}!\n" f"Your version: {PyFunceble.storage.PROJECT_VERSION}\n" f"Upstream version: {upstream_version.current_version}" ) else: message = ( "Please take the time to " f"update {PyFunceble.storage.PROJECT_NAME}!\n" f"Your version: {PyFunceble.storage.PROJECT_VERSION}\n" f"Upstream version: {upstream_version.current_version}" ) if version_utility.is_older_than(upstream_version.current_version): print(message) return True return False
5,337,753
def silent_unlink(path): """like os.unlink but does not raise error if the file does not exist""" try: os.unlink(path) except OSError, e: if e.errno != errno.ENOENT: raise
5,337,754
def tp(selector:Union[str, tuple]="@s", selector2:Union[str, tuple]=("~", "~", "~")): """ selector:Union[str, tuple] -> The position to be moved from selector2:Union[str, tuple] -> The position to be moved to """ if not ((isinstance(selector, str) or isinstance(selector, tuple)) and (isinstance(selector2, str) or isinstance(selector2, tuple))): return "## Tp command hasn't been configured properly ##" if isinstance(selector, tuple): if len(selector) < 3: selector = ("~", "~", "~") return f"tp {selector[0]} {selector[1]} {selector[2]}\n" else: if isinstance(selector2, tuple): if len(selector2) < 3: selector2 = ("~", "~", "~") return f"tp {selector} {selector2[0]} {selector2[1]} {selector2[2]}\n" else: return f"tp {selector} {selector2}\n"
5,337,755
def new_product() -> Product: """Generates an instance of Product with default values.""" return Product( product_id='', desc='', display_name='', capacity=0, image='')
5,337,756
def _rng_bit_generator_batching_rule(batched_args, batch_dims, *, shape, dtype, algorithm): """Calls RBG in a loop and stacks the results.""" key, = batched_args bd, = batch_dims if bd is batching.not_mapped: return lax.rng_bit_generator_p.bind(key, shape=shape, dtype=dtype, algorithm=algorithm), (None, None) key = batching.moveaxis(key, bd, 0) map_body = lambda k: lax.rng_bit_generator_p.bind(k, shape=shape, dtype=dtype, algorithm=algorithm) stacked_keys, stacked_bits = map(map_body, key) return (stacked_keys, stacked_bits), (0, 0)
5,337,757
def count_short_tail_keywords(keywords: List[str]) -> int: """ Returns the count of short tail keywords in a list of keywords. Parameters: keywords (List[str]): list with all keywords as strings. Returns: total (int): count of short tail keywords (1 o 2 words per keyword). """ total = 0 for keyword in keywords: keyword_list = keyword.split() if len(keyword_list) > 1 and len(keyword_list) < 3: total += 1 return total
5,337,758
def test_assets_are_known(mock_bitfinex): """This tests only exchange (trades) assets (not margin, nor futures ones). """ unsupported_assets = set(UNSUPPORTED_BITFINEX_ASSETS) common_items = unsupported_assets.intersection(set(WORLD_TO_BITFINEX.values())) assert not common_items, f'Bitfinex assets {common_items} should not be unsupported' currencies_response = mock_bitfinex._query_currencies() if currencies_response.success is False: response = currencies_response.response test_warnings.warn(UserWarning( f'Failed to request {mock_bitfinex.name} currencies list. ' f'Response status code: {response.status_code}. ' f'Response text: {response.text}. Xfailing this test', )) pytest.xfail('Failed to request {mock_bitfinex.name} currencies list') exchange_pairs_response = mock_bitfinex._query_exchange_pairs() if exchange_pairs_response.success is False: response = exchange_pairs_response.response test_warnings.warn(UserWarning( f'Failed to request {mock_bitfinex.name} exchange pairs list. ' f'Response status code: {response.status_code}. ' f'Response text: {response.text}. Xfailing this test', )) pytest.xfail('Failed to request {mock_bitfinex.name} exchange pairs list') currency_map_response = mock_bitfinex._query_currency_map() if currency_map_response.success is False: response = currency_map_response.response test_warnings.warn(UserWarning( f'Failed to request {mock_bitfinex.name} currency map. ' f'Response status code: {response.status_code}. ' f'Response text: {response.text}. Xfailing this test', )) pytest.xfail('Failed to request {mock_bitfinex.name} currency map') test_assets = set(BITFINEX_EXCHANGE_TEST_ASSETS) unsupported_assets = set(UNSUPPORTED_BITFINEX_ASSETS) currency_map = currency_map_response.currency_map symbols = set() for symbol in currencies_response.currencies: if symbol in test_assets: continue for pair in exchange_pairs_response.pairs: if pair.startswith(symbol) or pair.endswith(symbol): symbols.add(symbol) break for symbol in symbols: try: asset_from_bitfinex( bitfinex_name=symbol, currency_map=currency_map, ) except UnsupportedAsset: assert symbol in unsupported_assets except UnknownAsset as e: test_warnings.warn(UserWarning( f'Found unknown asset {e.asset_name} with symbol {symbol} in ' f'{mock_bitfinex.name}. Support for it has to be added', ))
5,337,759
def is_odd(number): """Determine if a number is odd.""" if number % 2 == 0: return False else: return True
5,337,760
def test_service_account_token_create_out_of_scope_service_account( permission_manage_service_accounts, staff_api_client, superuser_api_client, staff_user, permission_manage_orders, ): """Ensure user can't create token for service account with wider scope of permissions. Ensure superuser pass restrictions. """ app = App.objects.create(name="New_sa") query = SERVICE_ACCOUNT_TOKEN_CREATE_MUTATION app.permissions.add(permission_manage_orders) id = graphene.Node.to_global_id("ServiceAccount", app.id) variables = {"name": "Default token", "serviceAccount": id} # for staff user response = staff_api_client.post_graphql( query, variables={"input": variables}, permissions=(permission_manage_service_accounts,), ) content = get_graphql_content(response) data = content["data"]["serviceAccountTokenCreate"] errors = data["accountErrors"] assert not data["serviceAccountToken"] assert len(errors) == 1 error = errors[0] assert error["code"] == AccountErrorCode.OUT_OF_SCOPE_SERVICE_ACCOUNT.name assert error["field"] == "serviceAccount" # for superuser response = superuser_api_client.post_graphql(query, variables={"input": variables}) content = get_graphql_content(response) token_data = content["data"]["serviceAccountTokenCreate"]["serviceAccountToken"] auth_token_data = content["data"]["serviceAccountTokenCreate"]["authToken"] auth_token = app.tokens.get().auth_token assert auth_token_data == auth_token assert token_data["authToken"] == auth_token[-4:] assert token_data["name"] == "Default token"
5,337,761
def fmt_quil_str(raw_str): """Format a raw Quil program string Args: raw_str (str): Quil program typed in by user. Returns: str: The Quil program with leading/trailing whitespace trimmed. """ raw_quil_str = str(raw_str) raw_quil_str_arr = raw_quil_str.split('\n') trimmed_quil_str_arr = [qs.strip() for qs in raw_quil_str_arr] trimmed_quil_str = '\n'.join([x for x in trimmed_quil_str_arr]) return trimmed_quil_str
5,337,762
def get_source_plate_uuid(barcode: str) -> Optional[str]: """Attempt to get a UUID for a source plate barcode. Arguments: barcode {str} -- The source plate barcode. Returns: {str} -- The source plate UUID; otherwise None if it cannot be determined. """ try: source_plates_collection: Collection = cast(Eve, app).data.driver.db.source_plates source_plate: Optional[SourcePlateDoc] = source_plates_collection.find_one({FIELD_BARCODE: barcode}) if source_plate is None: return None return source_plate.get(FIELD_LH_SOURCE_PLATE_UUID) except Exception as e: logger.error(f"An error occurred attempting to determine the UUID of source plate '{barcode}'") logger.exception(e) return None
5,337,763
def find_largest_digit(n): """ :param n: integers :return: the largest digit """ n = abs(n) # absolute the value if n < 10: return n else: return find_helper(n, 0)
5,337,764
def test_deny_this_without_attribute_access(code): """`this` object can't be used as a dependency directly.""" class Foo: pass with pytest.raises(DependencyError) as exc_info: code(Foo) message = str(exc_info.value) assert message == "You can not use 'this' directly in the 'Injector'"
5,337,765
def print_to_file(fname: Union[str, Path], fn: Callable, args=None, kwargs=None): """ All `print` function calls in `fn(*args, **kwargs)` uses a text file `fname`. :param fname: :param fn: :param args: args for fn :param kwargs: kwargs for fn :return: """ if fname: fname = Path(fname).with_suffix('.txt') if args is None: args = tuple() if kwargs is None: kwargs = dict() with (fname.open('w') if fname else open(os.devnull, 'w')) as file: with contextlib.redirect_stdout(file): fn(*args, **kwargs)
5,337,766
def extractChrononTranslations(item): """ """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol or frag) or 'preview' in item['title'].lower(): return None item['title'] = item['title'].replace('’', '') if 'Weapons cheat'.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, 'Modern weapons cheat in another world', vol, chp, frag=frag, postfix=postfix) if 'Heavenly Tribulation'.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, 'Heavenly Tribulation', vol, chp, frag=frag, postfix=postfix) if 'I can speak'.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, 'I Can Speak with Animals and Demons', vol, chp, frag=frag, postfix=postfix) if 'I Bought a Girl'.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, 'I Bought a Girl', vol, chp, frag=frag, postfix=postfix) if 'Girl Corps'.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, 'Girl Corps', vol, chp, frag=frag, postfix=postfix) if 'Modern Weapons'.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, 'Modern weapons cheat in another world', vol, chp, frag=frag, postfix=postfix) if 'Upper World'.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, 'Reincarnation ~ From the lower world to the upper world', vol, chp, frag=frag, postfix=postfix) if 'I work as a healer'.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, "I Work As A Healer In Another World's Labyrinth City", vol, chp, frag=frag, postfix=postfix) return False
5,337,767
def run_feature_selection(X, y, select_k_features): """Use a gradient boosting tree regressor as a proxy for finding the k most important features in X, returning indices for those features as output.""" from sklearn.ensemble import RandomForestRegressor from sklearn.feature_selection import SelectFromModel, SelectKBest clf = RandomForestRegressor(n_estimators=100, max_depth=3, random_state=0) clf.fit(X, y) selector = SelectFromModel( clf, threshold=-np.inf, max_features=select_k_features, prefit=True ) return selector.get_support(indices=True)
5,337,768
def add_path_to_syspath() -> None: """ >>> add_path_to_syspath() """ path_to_append = pathlib.Path(__file__).resolve().parent sys_paths_resolved = [pathlib.Path(path).resolve() for path in sys.path] if path_to_append not in sys_paths_resolved: sys.path.append(str(path_to_append))
5,337,769
def _not_json_encodable(message: str, failure_callback: Optional[Callable[[str], None]]) -> Literal[False]: """ Utility message to fail (return `False`) by first calling an optional failure callback. """ if failure_callback: failure_callback(message) return False
5,337,770
def requires(*commands: str) -> RequiresT: """Decorator to require the given commands.""" def inner(func: ReturnT) -> ReturnT: """Decorates the function and checks for the commands.""" for command in commands: if not check_availability(command): raise errors.MissingShellCommand( f"ipq requires the {command!r} command, please install it." ) @functools.wraps(func) def wrapper(*args: t.Any, **kwargs: t.Any) -> str: """Wraps and executes the decorated function.""" return func(*args, **kwargs) return wrapper return inner
5,337,771
def _build_model(input_dim, num_classes, num_hidden_layers=0, hidden_dimension=128, normalize_inputs=False, dropout=0): """ Macro to generate a Keras classification model """ inpt = tf.keras.layers.Input((input_dim)) net = inpt # if we're normalizing inputs: if normalize_inputs: norm = tf.keras.layers.Lambda(lambda x:K.l2_normalize(x,axis=1)) net = norm(net) # for each hidden layer for _ in range(num_hidden_layers): if dropout > 0: net = tf.keras.layers.Dropout(dropout)(net) net = tf.keras.layers.Dense(hidden_dimension, activation="relu")(net) # final layer if dropout > 0: net = tf.keras.layers.Dropout(dropout)(net) net = tf.keras.layers.Dense(num_classes, activation="relu")(net) return tf.keras.Model(inpt, net)
5,337,772
def get_client( project_id, cloud_region, registry_id, device_id, private_key_file, algorithm, ca_certs, mqtt_bridge_hostname, mqtt_bridge_port): """Create our MQTT client. The client_id is a unique string that identifies this device. For Google Cloud IoT Core, it must be in the format below.""" client = mqtt.Client( client_id=('projects/{}/locations/{}/registries/{}/devices/{}' .format( project_id, cloud_region, registry_id, device_id))) # With Google Cloud IoT Core, the username field is ignored, and the # password field is used to transmit a JWT to authorize the device. client.username_pw_set( username='unused', password=create_jwt( project_id, private_key_file, algorithm)) # Enable SSL/TLS support. client.tls_set(ca_certs=ca_certs) # Register message callbacks. https://eclipse.org/paho/clients/python/docs/ # describes additional callbacks that Paho supports. In this example, the # callbacks just print to standard out. client.on_connect = on_connect client.on_publish = on_publish client.on_disconnect = on_disconnect client.on_message = on_message client.on_subscribe = on_subscribe # Connect to the Google MQTT bridge. client.connect(mqtt_bridge_hostname, mqtt_bridge_port) return client
5,337,773
def fake_image_sct_custom(data): """ :return: an Image (3D) in RAS+ (aka SCT LPI) space """ i = fake_image_custom(data) img = msct_image.Image(i.get_data(), hdr=i.header, orientation="LPI", dim=i.header.get_data_shape(), ) return img
5,337,774
def copy_sensor_image(input_sensor_file, output_sensor_file, raft_slot, sensor_slot, **kwargs): """ Copies a FITS file, with hooks to update the various image headers Parameters ---------- input_sensor_file : str Name of the file to be copied output_sensor_file : str Destination raft_slot : str Name of the raft slot within the mulit-raft setup sensor_slot : str Name of the sensor slot within the raft Keyword arguments ----------------- overwrite : bool, optional Flag indicating whether to overwrite an existing output file dry_run : bool, optional If true, just print output file names, but do not copy files """ overwrite = kwargs.get('overwrite', True) dry_run = kwargs.get('dry_run', False) if dry_run: os.system("touch %s"% output_sensor_file) return hdulist = fits.open(input_sensor_file) update_primary_header(raft_slot, sensor_slot, hdulist[0]) for ext_num in range(1, 16): update_image_header(raft_slot, sensor_slot, hdulist[ext_num]) hdulist.writeto(output_sensor_file, clobber=overwrite) hdulist.close()
5,337,775
def test_version(): """Test version number of released package.""" assert __version__ == "0.2.0"
5,337,776
def _get_property_header(resource, resource_type): """ Create a dictionary representing resources properties :param resource: The name of the resource for which to create a property header :param resource_type: The type of the resource (model, seed, etc.) :return: A dictionary representing resource properties """ header_dict = { 'version': 2, _SUPPORTED_RESOURCE_TYPES[resource_type]: [ {'name': resource, 'description': "", 'columns': []} ], } return header_dict
5,337,777
def create_metapaths_parameters(filename, folder): """ creates a parameters file from the default """ default_filename = folder + PATHDELIM + 'resources'+ PATHDELIM + "template_param.txt" try: filep = open(default_filename, 'r') except: eprintf("ERROR: cannot open the default parameter file " + sQuote(default_filename) ) exit_process("ERROR: cannot open the default parameter file " + sQuote(default_filename), errorCode = 0 ) lines = filep.readlines() with open(filename, 'w') as newfile: for line in lines: fprintf(newfile, "%s", line); filep.close() #result['filename'] = filename return True
5,337,778
def test_series() -> None: """Tests for :meth:`NDRepr.repr_Series`.""" s1 = pd.Series(np.ones(10, dtype='float64')) s2 = pd.Series(np.ones(10, dtype='int64')) ref1 = '0 1.0\n1 1.0\n2 1.0\n3 1.0\n4 1.0\n5 1.0\n6 1.0\n7 1.0\n8 1.0\n9 1.0\ndtype: float64' # noqa: E501 ref2 = '0 1\n1 1\n2 1\n3 1\n4 1\n5 1\n6 1\n7 1\n8 1\n9 1\ndtype: int64' # noqa: E501 str1 = aNDRepr.repr(s1) str2 = aNDRepr.repr(s2) assertion.eq(str1, ref1) assertion.eq(str2, ref2)
5,337,779
def deploy(args: argparse.Namespace) -> None: """ Handler for `lambada deploy`, which creates an AWS Lambda from a Simiotics function Args: args `argparse.Namespace` object containing parameters to the `deploy` command Returns: None, prints AWS Lambda ARN """ simiotics = client_from_env() registered_function = simiotics.get_registered_function(args.key) if registered_function.tags.get(LambadaManagerKey) != LambadaManager: raise ValueError('Simiotics function with key={} not managed by lambada'.format(args.key)) environment_variables: Dict[str, str] = json.loads(registered_function.tags.get('env', '{}')) staging_dir = tempfile.mkdtemp() try: deployment_package_dir = os.path.join(staging_dir, 'deployment_package') os.mkdir(deployment_package_dir) requirements_txt = os.path.join(staging_dir, 'requirements.txt') code_py = os.path.join(deployment_package_dir, 'code.py') with open(requirements_txt, 'w') as ofp: ofp.write(registered_function.tags['requirements']) subprocess.run( [ sys.executable, "-m", "pip", "install", "-r", requirements_txt, "--target", deployment_package_dir ], check=True, ) if os.path.exists(code_py): raise ValueError('File already exists at path: {}'.format(code_py)) with open(code_py, 'w') as ofp: ofp.write(registered_function.code) zipfilepath = os.path.join(staging_dir, 'function.zip') shutil.make_archive(os.path.splitext(zipfilepath)[0], 'zip', deployment_package_dir) with open(zipfilepath, 'rb') as ifp: deployment_package = ifp.read() lambda_client = boto3.client('lambda') handler_path = 'code.{}'.format(registered_function.tags['handler']) lambda_resource = lambda_client.create_function( FunctionName=args.name, Runtime=registered_function.tags['runtime'], Role=registered_function.tags['iam_role_arn'], Handler=handler_path, Code={'ZipFile': deployment_package}, Environment={ 'Variables': environment_variables, }, Description='Simiotics lambada deployment of: {}'.format(args.key), Timeout=int(registered_function.tags['timeout']), Tags={ 'Creator': 'simiotics', }, ) lambda_arn = lambda_resource['FunctionArn'] registered_function.tags['lambda_arn'] = lambda_arn simiotics.register_function( key=registered_function.key, code=registered_function.code, tags=registered_function.tags, overwrite=True, ) print(lambda_arn) finally: if not args.keep_staging_dir: shutil.rmtree(staging_dir) else: print(staging_dir, file=sys.stderr)
5,337,780
def dist_prune(DELTA, prune=True): """ transform similarity matrix to distance matrix - prune matrix by removing edges that have a distance larger than condition cond (default mean distance) """ w = np.max(DELTA) DELTA = np.abs(DELTA - w) np.fill_diagonal(DELTA, 0.) if prune: cond = np.mean(DELTA) # + np.std(DELTA)# TODO: transform to parameter with choice between models for i in range(DELTA.shape[0]): for j in range(DELTA.shape[1]): val = DELTA[i, j] if val > cond: DELTA[i, j] = 0. else: DELTA[i, j] = DELTA[i, j] return DELTA
5,337,781
def bitwise_dot(x, y): """Compute the dot product of two integers bitwise.""" def bit_parity(i): n = bin(i).count("1") return int(n % 2) return bit_parity(x & y)
5,337,782
def check_limit(): """ Empty function for enabling global rate limiting. """ return
5,337,783
def fit (samples, degree, sample_weights=None): """ Fit a univariate polynomial function to the 2d points given in samples, where the rows of samples are the points. The return value is the vector of coefficients of the polynomial (see p below) which minimizes the squared error of the polynomial at the given samples. Denote the components of samples as samples = [ [x[0], y[0]], [x[1], y[1]], ... ] and let p(coefficients)(t) = sum(coefficient*t**i for i,coefficient in enumerate(coefficients)) noting that coefficients[0] is the constant term, coefficients[1] is the linear coefficient, etc. """ assert len(samples.shape) == 2 assert samples.shape[1] == 2, 'Expected the rows of samples to be (x,y) pairs.' A = np.zeros((degree+1,degree+1), dtype=float) B = np.zeros((degree+1,), dtype=float) weight_iterator = sample_weights if sample_weights is not None else itertools.cycle([1.0]) for (x,y),weight in itertools.izip(samples,weight_iterator): g = geometric(x, degree) A += weight*np.outer(g,g) B += weight*y*g coefficients,_,_,_ = np.linalg.lstsq(A,B) return coefficients
5,337,784
def section_underline_overindented_and_contentless(): # noqa: D416 """Toggle the gizmo. Returns ------- """
5,337,785
def get_engines(): """ Returns a list of all engines for tests """ engines = [] base_dir = os.getcwd() engines_dir = os.path.join(base_dir, 'search_engine_parser', 'core', 'engines') for filename in os.listdir(engines_dir): if os.path.isfile(os.path.join(engines_dir, filename)) and filename.endswith('.py') \ and filename != '__init__.py': engine = filename.split('.py')[0] module = import_module("search_engine_parser.core.engines.{}".format(engine.lower())) engine_class = getattr(module, "Search") engines.append([engine, engine_class(),]) return engines
5,337,786
def IsEncryptedCoredump(path): """ Function to find if the coredump is encrypted or not. """ if not os.path.exists('/bin/vmkdump_extract'): raise Exception('vmkdump_extract not present.') result, rc = RunCmd("/bin/vmkdump_extract -E {0}".format(path)) if rc != 0: raise Exception( 'RunCmd failed when trying to check for encrypted coredump') return result.strip() == "YES"
5,337,787
def test_ap_ht40_5ghz_match(dev, apdev): """HT40 co-ex scan on 5 GHz with matching pri/sec channel""" clear_scan_cache(apdev[0]['ifname']) try: hapd = None hapd2 = None params = { "ssid": "test-ht40", "hw_mode": "a", "channel": "36", "country_code": "US", "ht_capab": "[HT40+]"} hapd2 = hostapd.add_ap(apdev[1]['ifname'], params) params = { "ssid": "test-ht40", "hw_mode": "a", "channel": "36", "ht_capab": "[HT40+]"} hapd = hostapd.add_ap(apdev[0]['ifname'], params, wait_enabled=False) state = hapd.get_status_field("state") if state != "HT_SCAN": time.sleep(0.1) state = hapd.get_status_field("state") if state != "HT_SCAN": raise Exception("Unexpected interface state - expected HT_SCAN") ev = hapd.wait_event(["AP-ENABLED"], timeout=10) if not ev: raise Exception("AP setup timed out") state = hapd.get_status_field("state") if state != "ENABLED": raise Exception("Unexpected interface state - expected ENABLED") freq = hapd.get_status_field("freq") if freq != "5180": raise Exception("Unexpected frequency") pri = hapd.get_status_field("channel") if pri != "36": raise Exception("Unexpected primary channel") sec = hapd.get_status_field("secondary_channel") if sec != "1": raise Exception("Unexpected secondary channel: " + sec) dev[0].connect("test-ht40", key_mgmt="NONE", scan_freq=freq) finally: dev[0].request("DISCONNECT") if hapd: hapd.request("DISABLE") if hapd2: hapd2.request("DISABLE") subprocess.call(['iw', 'reg', 'set', '00']) dev[0].flush_scan_cache()
5,337,788
def get_ahead_mask(tokens, i_pad=0): """ ahead mask 계산하는 함수 :param tokens: tokens (bs, n_seq) :param i_pad: id of pad :return mask: ahead and pad mask (ahead or pad: 1, other: 0) """ n_seq = tf.shape(tokens)[1] ahead_mask = 1 - tf.linalg.band_part(tf.ones((n_seq, n_seq)), -1, 0) ahead_mask = tf.expand_dims(ahead_mask, axis=0) pad_mask = get_pad_mask(tokens, i_pad) mask = tf.maximum(ahead_mask, pad_mask) return mask
5,337,789
def check_containment(row, query_index, reference_index, percent_identity=PERCENT_IDENTITY, covered_length=COVERED_LENGTH): """Checks if a row from a blast out format 6 file is a containment Takes in a row from a blast out format 6 table, a DataFrames with query sequence and reference sequence data. """ if (row['qId'] != row['tId']) and (row['seqIdentity'] >= percent_identity): query_covered = row['alnLen']/float(query_index.loc[row['qId'], 'LENGTH']) reference_covered = row['alnLen']/float(reference_index.loc[row['tId'], 'LENGTH']) if query_covered >= covered_length or reference_covered >= covered_length: return True else: return False else: return False
5,337,790
def get_mgga_data(mol, grid, rdm1): """ Get atomic orbital and density data. See eval_ao and eval_rho docs for details. Briefly, returns 0-3 derivatives of the atomic orbitals in ao_data; and the density, first derivatives of density, Laplacian of density, and kinetic energy density in rho_data. """ ao_data = eval_ao(mol, grid.coords, deriv=3) if len(rdm1.shape) == 2: rho_data = eval_rho(mol, ao_data, rdm1, xctype='mGGA') else: part0 = eval_rho(mol, ao_data, rdm1[0], xctype='mGGA') part1 = eval_rho(mol, ao_data, rdm1[1], xctype='mGGA') rho_data = np.array([part0, part1]) return ao_data, rho_data
5,337,791
def bgp_prefix_tc1_remove(duthost, community): """ Test to remove prefix config """ json_patch = [ { "op": "remove", "path": "/BGP_ALLOWED_PREFIXES" } ] tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) bgp_config = show_bgp_running_config(duthost) pytest_assert( not re.search(PREFIXES_V4_RE.format(community, PREFIXES_V4_DUMMY), bgp_config), "Failed to remove bgp prefix v4 config." ) pytest_assert( not re.search(PREFIXES_V6_RE.format(community, PREFIXES_V6_DUMMY), bgp_config), "Failed to remove bgp prefix v6 config." ) finally: delete_tmpfile(duthost, tmpfile)
5,337,792
def _stringify_lmer_warnings(fg_lmer): """create grid w/ _ separated string of lme4::lmer warning list items, else "" """ warning_grids = fitgrid.utils.lmer.get_lmer_warnings( fg_lmer ) # dict of indicator dataframes warning_string_grid = pd.DataFrame( np.full(fg_lmer._grid.shape, ""), index=fg_lmer._grid.index.copy(), columns=fg_lmer._grid.columns.copy(), ) # collect multiple warnings into single sorted "_" separated strings # on a tidy time x channel grid for warning, warning_grid in warning_grids.items(): for idx, row_vals in warning_grid.iterrows(): for jdx, col_val in row_vals.iteritems(): if col_val: if len(warning_string_grid.loc[idx, jdx]) == 0: warning_string_grid.loc[idx, jdx] = warning else: # split, sort, reassemble wrns = "_".join( sorted( warning_string_grid.loc[idx, jdx].split("_") + [warning] ) ) warning_string_grid.loc[idx, jdx] = wrns return warning_string_grid
5,337,793
def test_rm_token_cache( httpx_mock: HTTPXMock, check_token_callback: Callable, check_credentials_callback: Callable, settings: Settings, auth_url: str, account_id_url: str, account_id_callback: Callable, provider_callback: Callable, provider_url: str, access_token: str, ) -> None: """Creadentials, that are passed to rm are processed properly.""" url = "https://url" httpx_mock.add_callback(check_credentials_callback, url=auth_url) httpx_mock.add_callback(check_token_callback, url=url) httpx_mock.add_callback(provider_callback, url=provider_url) httpx_mock.add_callback(account_id_callback, url=account_id_url) with Patcher(): local_settings = Settings( user=settings.user, password=settings.password.get_secret_value(), server=settings.server, default_region=settings.default_region, use_token_cache=True, ) rm = ResourceManager(local_settings) rm.client.get(url) ts = TokenSecureStorage(settings.user, settings.password.get_secret_value()) assert ts.get_cached_token() == access_token, "Invalid token value cached" # Do the same, but with use_token_cache=False with Patcher(): local_settings = Settings( user=settings.user, password=settings.password.get_secret_value(), server=settings.server, default_region=settings.default_region, use_token_cache=False, ) rm = ResourceManager(local_settings) rm.client.get(url) ts = TokenSecureStorage(settings.user, settings.password.get_secret_value()) assert ( ts.get_cached_token() is None ), "Token is cached even though caching is disabled"
5,337,794
def index(): """Loads the index page for the 'Admin' controller :returns: a dictionary to pass to the view with the list of ctr_enabled and the active module ('admin') """ ctr_data = get_ctr_data() users = db().select(db.auth_user.ALL) approvals = db(db.auth_user.registration_key=='pending').select(db.auth_user.ALL) return dict(ctr_enabled=ctr_enabled, ctr_data=ctr_data, active_module='jadmin', users=users, approvals=approvals, doctypes=DOC_TYPES)
5,337,795
def get_language_codes(): """Returns a list of available languages and their 2 char input codes """ languages = get_languages() two_dig_codes = [k for k, v in languages.items()] return two_dig_codes
5,337,796
def fun_evaluate_ndcg(user_test_recom_zero_one): """ 计算ndcg。所得是单个用户test的,最后所有用户的求和取平均 :param test_lst: 单个用户的test集 :param zero_one: 0/1序列 :param test_mask: 单个用户的test列表对应的mask列表 :return: """ test_lst, zero_one, test_mask, _ = user_test_recom_zero_one test_lst = test_lst[:np.sum(test_mask)] zero_one = np.array(zero_one) if 0 == sum(zero_one): # 没有命中的 return 0.0 s = 0.0 idxs = list(np.nonzero(zero_one))[0] for idx in idxs: s += 1.0 / np.log2(idx + 2) m = 0.0 length = min(len(test_lst), len(zero_one)) # 序列短的,都命中为1,此时是最优情况 for idx in range(length): m += 1.0 / np.log2(idx + 2) return s / m
5,337,797
def prettyDataSize(size_in_bytes): """ Takes a data size in bytes and formats a pretty string. """ unit = "B" size_in_bytes = float(size_in_bytes) if size_in_bytes > 1024: size_in_bytes /= 1024 unit = "kiB" if size_in_bytes > 1024: size_in_bytes /= 1024 unit = "MiB" if size_in_bytes > 1024: size_in_bytes /= 1024 unit = "GiB" if size_in_bytes > 1024: size_in_bytes /= 1024 unit = "TiB" print size_in_bytes, "%.1f "%size_in_bytes + unit return "%.1f "%size_in_bytes + unit
5,337,798
def test_projectlocale_latest_activity_success(translation_a): """ If the matching ProjectLocale has a latest_translation, return it's latest_activity. """ project = translation_a.entity.resource.project locale = translation_a.locale assert ProjectLocale.get_latest_activity(project, locale) assert ( ProjectLocale.get_latest_activity(project, locale) == translation_a.latest_activity )
5,337,799