content
stringlengths
22
815k
id
int64
0
4.91M
def needed_to_build_multi(deriv_outputs, existing=None, on_server=None): """ :param deriv_outputs: A mapping from derivations to sets of outputs. :type deriv_outputs: ``dict`` of ``Derivation`` to ``set`` of ``str`` """ if existing is None: existing = {} if on_server is None: on_server = {} needed, need_fetch = {}, {} for deriv, outputs in deriv_outputs.items(): needed_to_build(deriv, outputs, needed=needed, need_fetch=need_fetch, existing=existing, on_server=on_server) return needed, need_fetch
9,600
def iscode(c): """ Tests if argument type could be lines of code, i.e. list of strings """ if type(c) == type([]): if c: return type(c[0]) == type('') else: return True else: return False
9,601
def get_comment_list(request, thread_id, endorsed, page, page_size, requested_fields=None): """ Return the list of comments in the given thread. Arguments: request: The django request object used for build_absolute_uri and determining the requesting user. thread_id: The id of the thread to get comments for. endorsed: Boolean indicating whether to get endorsed or non-endorsed comments (or None for all comments). Must be None for a discussion thread and non-None for a question thread. page: The page number (1-indexed) to retrieve page_size: The number of comments to retrieve per page requested_fields: Indicates which additional fields to return for each comment. (i.e. ['profile_image']) Returns: A paginated result containing a list of comments; see discussion.rest_api.views.CommentViewSet for more detail. """ response_skip = page_size * (page - 1) cc_thread, context = _get_thread_and_context( request, thread_id, retrieve_kwargs={ "with_responses": True, "recursive": False, "user_id": request.user.id, "response_skip": response_skip, "response_limit": page_size, } ) # Responses to discussion threads cannot be separated by endorsed, but # responses to question threads must be separated by endorsed due to the # existing comments service interface if cc_thread["thread_type"] == "question": if endorsed is None: # lint-amnesty, pylint: disable=no-else-raise raise ValidationError({"endorsed": ["This field is required for question threads."]}) elif endorsed: # CS does not apply resp_skip and resp_limit to endorsed responses # of a question post responses = cc_thread["endorsed_responses"][response_skip:(response_skip + page_size)] resp_total = len(cc_thread["endorsed_responses"]) else: responses = cc_thread["non_endorsed_responses"] resp_total = cc_thread["non_endorsed_resp_total"] else: if endorsed is not None: raise ValidationError( {"endorsed": ["This field may not be specified for discussion threads."]} ) responses = cc_thread["children"] resp_total = cc_thread["resp_total"] # The comments service returns the last page of results if the requested # page is beyond the last page, but we want be consistent with DRF's general # behavior and return a PageNotFoundError in that case if not responses and page != 1: raise PageNotFoundError("Page not found (No results on this page).") num_pages = (resp_total + page_size - 1) // page_size if resp_total else 1 results = _serialize_discussion_entities(request, context, responses, requested_fields, DiscussionEntity.comment) paginator = DiscussionAPIPagination(request, page, num_pages, resp_total) return paginator.get_paginated_response(results)
9,602
def files(name: str, dependencies=False, excludes=None) -> List[PackagePath]: """ List all files belonging to a distribution. Arguments: name: The name of the distribution. dependencies: Recursively collect files of dependencies too. excludes: Distributions to ignore if **dependencies** is true. Returns: All filenames belonging to the given distribution. With ``dependencies=False``, this is just a shortcut for:: conda_support.distribution(name).files """ return [file for dist in _iter_distributions(name, dependencies, excludes) for file in dist.files]
9,603
def ready_to_delete_data_node(name, has_executed, graph): """ Determines if a DataPlaceholderNode is ready to be deleted from the cache. Args: name: The name of the data node to check has_executed: set A set containing all operations that have been executed so far graph: The networkx graph containing the operations and data nodes Returns: A boolean indicating whether the data node can be deleted or not. """ data_node = get_data_node(name, graph) return set(gr.successors(graph, data_node)).issubset(has_executed)
9,604
def search(): """Use open ewather api to look up current weather conditions given a city/ city, country""" global response #Get API response #URL and my api key....USE YOUR OWN API KEY! url = 'https://api.openweathermap.org/data/2.5/weather' api_key = '6da92ea5e09090fa9c8a08e08eb30284' #USE YOUR OWN API KEY #Search by the appropriate query, either city name or zip if search_method.get() == 1: querystring = {"q":city_entry.get(), 'appid':api_key, 'units':'imperial'} elif search_method.get() == 2: querystring = {"zip":city_entry.get(), 'appid':api_key, 'units':'imperial'} #Call API response = requests.request("GET", url, params=querystring) response = response.json() #Example response return '''{'coord': {'lon': -71.06, 'lat': 42.36}, 'weather': [{'id': 500, 'main': 'Rain', 'description': 'light rain', 'icon': '10d'}], 'base': 'stations', 'main': {'temp': 298.88, 'feels_like': 302.56, 'temp_min': 298.15, 'temp_max': 299.82, 'pressure': 1010, 'humidity': 85}, 'visibility': 10000, 'wind': {'speed': 2.24, 'deg': 151, 'gust': 4.47}, 'rain': {'1h': 0.25}, 'clouds': {'all': 82}, 'dt': 1596407271, 'sys': {'type': 3, 'id': 2005683, 'country': 'US', 'sunrise': 1596361095, 'sunset': 1596412955}, 'timezone': -14400, 'id': 4930956, 'name': 'Boston', 'cod': 200}''' get_weather() get_icon()
9,605
def extract_hash_parts(repo): """Extract hash parts from repo""" full_hash = hashlib.sha1(repo.encode("utf-8")).hexdigest() return full_hash[:2], full_hash[2:]
9,606
def _testComponentReferences(): """ >>> from defcon.test.testTools import getTestFontPath >>> font = Font(getTestFontPath()) >>> font.componentReferences {'A': set(['C']), 'B': set(['C'])} >>> glyph = font["C"] >>> font.componentReferences {'A': set(['C']), 'B': set(['C'])} """
9,607
def create_cloud_mask(im_QA, satname, cloud_mask_issue): """ Creates a cloud mask using the information contained in the QA band. KV WRL 2018 Arguments: ----------- im_QA: np.array Image containing the QA band satname: string short name for the satellite: ```'L5', 'L7', 'L8' or 'S2'``` cloud_mask_issue: boolean True if there is an issue with the cloud mask and sand pixels are being erroneously masked on the images Returns: ----------- cloud_mask : np.array boolean array with True if a pixel is cloudy and False otherwise """ # convert QA bits (the bits allocated to cloud cover vary depending on the satellite mission) if satname == 'L8': cloud_values = [2800, 2804, 2808, 2812, 6896, 6900, 6904, 6908] elif satname == 'L7' or satname == 'L5' or satname == 'L4': cloud_values = [752, 756, 760, 764] elif satname == 'S2': cloud_values = [1024, 2048] # 1024 = dense cloud, 2048 = cirrus clouds # find which pixels have bits corresponding to cloud values cloud_mask = np.isin(im_QA, cloud_values) # remove cloud pixels that form very thin features. These are beach or swash pixels that are # erroneously identified as clouds by the CFMASK algorithm applied to the images by the USGS. if sum(sum(cloud_mask)) > 0 and sum(sum(~cloud_mask)) > 0: morphology.remove_small_objects(cloud_mask, min_size=10, connectivity=1, in_place=True) if cloud_mask_issue: elem = morphology.square(3) # use a square of width 3 pixels cloud_mask = morphology.binary_opening(cloud_mask,elem) # perform image opening # remove objects with less than 25 connected pixels morphology.remove_small_objects(cloud_mask, min_size=25, connectivity=1, in_place=True) return cloud_mask
9,608
def split_bucket(s3_key): """ Returns the bucket name and the key from an s3 location string. """ match = re.match(r'(?:s3://)?([^/]+)/(.*)', s3_key, re.IGNORECASE) if not match: return None, s3_key return match.group(1), match.group(2)
9,609
def contains_chinese(ustr): """ 将字符串中的中文去除 Args: ustr: 字符串 Returns: 去除中文的字符串 """ return any('\u4e00' <= char <= '\u9fff' for char in ustr)
9,610
def create_publish_recipe_file(component_name, component_version, parsed_component_recipe): """ Creates a new recipe file(json or yaml) with anme `<component_name>-<component_version>.extension` in the component recipes build directory. This recipe is updated with the component version calculated and artifact URIs of the artifacts. Parameters ---------- component_name(string): Name of the component. This is also used in the name of the recipe file. component_version(string): Version of the component calculated based on the configuration. parsed_component_recipe(dict): Updated publish recipe with component version and s3 artifact uris Returns ------- None """ ext = project_config["component_recipe_file"].name.split(".")[-1] # json or yaml publish_recipe_file_name = f"{component_name}-{component_version}.{ext}" # Eg. HelloWorld-1.0.0.yaml publish_recipe_file = Path(project_config["gg_build_recipes_dir"]).joinpath(publish_recipe_file_name).resolve() project_config["publish_recipe_file"] = publish_recipe_file with open(publish_recipe_file, "w") as prf: try: logging.debug( "Creating component recipe '{}' in '{}'.".format( publish_recipe_file_name, project_config["gg_build_recipes_dir"] ) ) if publish_recipe_file_name.endswith(".json"): prf.write(json.dumps(parsed_component_recipe, indent=4)) else: yaml.dump(parsed_component_recipe, prf) except Exception as e: raise Exception("""Failed to create publish recipe file at '{}'.\n{}""".format(publish_recipe_file, e))
9,611
def MakeCdf(live): """Plot the CDF of pregnancy lengths for live births. live: DataFrame for live births """ cdf = thinkstats2.Cdf(live.prglngth, label='prglngth') thinkplot.Cdf(cdf) thinkplot.Save('cumulative_prglngth_cdf', title='Pregnancy length', xlabel='weeks', ylabel='CDF')
9,612
def make_non_writable(location): """ Make location non writable for tests purpose. """ if on_posix: current_stat = stat.S_IMODE(os.lstat(location).st_mode) os.chmod(location, current_stat & ~stat.S_IWRITE) else: make_non_readable(location)
9,613
def payment_activity(): """Request for extra-curricular activity""" try: req_json = request.get_json(force=True) except TypeError: return jsonify(message='Invalid json input'), 400 activity_info = req_json['activity'] student = req_json['student'] envelope_args = { 'signer_client_id': 1000, 'ds_return_url': req_json['callback-url'], 'gateway_account_id': session.get('payment_gateway_account_id'), 'gateway_name': session.get('payment_gateway'), 'gateway_display_name': session.get('payment_display_name') } try: # Create envelope with payment envelope = DsDocument.create_with_payment( 'payment-activity.html', student, activity_info, envelope_args ) # Submit envelope to Docusign envelope_id = Envelope.send(envelope, session) except ApiException as exc: return process_error(exc) user_documents = session.get('ds_documents') if not user_documents: session['ds_documents'] = [envelope_id] else: session['ds_documents'].append(envelope_id) try: # Get the recipient view result = Envelope.get_view(envelope_id, envelope_args, student, session) except ApiException as exc: return process_error(exc) return jsonify({'envelope_id': envelope_id, 'redirect_url': result.url})
9,614
def gdc_to_dos_list_response(gdcr): """ Takes a GDC list response and converts it to GA4GH. :param gdc: :return: """ mres = {} mres['data_objects'] = [] for id_ in gdcr.get('ids', []): mres['data_objects'].append({'id': id_}) if len(gdcr.get('ids', [])) > 0: mres['next_page_token'] = gdcr['ids'][-1:][0] return mres
9,615
def get_labels_by_db_and_omic_from_graph(graph): """Return labels by db and omic given a graph.""" db_subsets = defaultdict(set) db_entites = defaultdict(dict) entites_db = defaultdict(dict) # entity_type_map = {'Gene':'genes', 'mirna_nodes':'mirna', 'Abundance':'metabolites', 'BiologicalProcess':'bps'} for u, v, k in graph.edges(keys=True): if ANNOTATIONS not in graph[u][v][k]: continue if 'database' not in graph[u][v][k][ANNOTATIONS]: continue for database in graph[u][v][k][ANNOTATIONS]['database']: db_subsets[database].add(u) db_subsets[database].add(v) for database, nodes in db_subsets.items(): db_entites[database] = calculate_database_sets_as_dict(nodes, database) database_sets = calculate_database_sets_as_dict(nodes, database) db_entites[database] = database_sets for entity_type, entities in database_sets.items(): entites_db[entity_type][database] = entities return db_entites, entites_db
9,616
def python_modules(): """Determine if there are python modules in the cwd. Returns: list of python modules as strings """ ignored = ["setup.py", "conftest.py"] py_modules = [] for file_ in os.listdir(os.path.abspath(os.curdir)): if file_ in ignored or not os.path.isfile(file_): continue file_name, file_ext = os.path.splitext(file_) if file_ext == ".py": py_modules.append(file_name) return sorted(py_modules)
9,617
def covid_API_england (): """Function retrieves date, hospital admissions, total deaths and daily cases using government API""" england_only = [ 'areaType=nation', 'areaName=England' ] dates_and_cases = { "date": "date", "newCasesByPublishDate": "newCasesByPublishDate", "newAdmissions": "newAdmissions", "cumDailyNsoDeathsByDeathDate":"cumDailyNsoDeathsByDeathDate" } api = Cov19API(filters=england_only, structure=dates_and_cases) logging.info('API has received query for national data') global DATA2 DATA2 = api.get_json() return DATA2
9,618
def test_should_bail_out(check, instance): """ backoff should give up after 3 attempts """ with mock.patch('datadog_checks.php_fpm.php_fpm.requests') as r: attrs = {'raise_for_status.side_effect': FooException()} r.get.side_effect = [ mock.MagicMock(status_code=503, **attrs), mock.MagicMock(status_code=503, **attrs), mock.MagicMock(status_code=503, **attrs), mock.MagicMock(status_code=200), ] with pytest.raises(FooException): check._process_status(instance['status_url'], None, [], None, 10, True, False)
9,619
def _complete_uninstall( action : Optional[List[str]] = None, **kw : Any ) -> List[str]: """ Override the default Meerschaum `complete_` function. """ if action is None: action = [] options = { 'plugin': _complete_uninstall_plugins, 'plugins': _complete_uninstall_plugins, } if len(action) > 0 and action[0] in options: sub = action[0] del action[0] return options[sub](action=action, **kw) from meerschaum.actions.shell import default_action_completer return default_action_completer(action=(['uninstall'] + action), **kw)
9,620
def test_bucket(): """Universal bucket name for use throughout testing""" return 'test_bucket'
9,621
def validate_args(args): """ Validate all of the arguments parsed. Args: args (argparser.ArgumentParser) : Args parsed by the argument parser. Returns: args (CoreclrArguments) : Args parsed Notes: If the arguments are valid then return them all in a tuple. If not, raise an exception stating x argument is incorrect. """ coreclr_setup_args = CoreclrArguments(args, require_built_test_dir=False, require_built_core_root=True, require_built_product_dir=False) coreclr_setup_args.verify(args, "base_root", lambda directory: os.path.isdir(directory) if directory is not None else True, "Base root is not a valid directory") coreclr_setup_args.verify(args, "diff_root", lambda directory: os.path.isdir(directory) if directory is not None else True, "Diff root is not a valid directory", modify_arg=lambda directory: nth_dirname(os.path.abspath(sys.argv[0]), 3) if directory is None else os.path.abspath(directory)) coreclr_setup_args.verify(args, "scratch_root", lambda unused: True, "Error setting scratch_root", modify_arg=lambda directory: os.path.join(coreclr_setup_args.diff_root, '_', 'pmi') if directory is None else os.path.abspath(directory)) coreclr_setup_args.verify(args, "skip_baseline_build", lambda unused: True, "Error setting baseline build") coreclr_setup_args.verify(args, "skip_diffs", lambda unused: True, "Error setting skip_diffs") coreclr_setup_args.verify(args, "target_branch", lambda unused: True, "Error setting target_branch") coreclr_setup_args.verify(args, "commit_hash", lambda unused: True, "Error setting commit_hash") coreclr_setup_args.verify(args, "ci_arch", lambda ci_arch: ci_arch in coreclr_setup_args.valid_arches + ['x86_arm_altjit', 'x64_arm64_altjit'], "Error setting ci_arch") args = ( coreclr_setup_args.arch, coreclr_setup_args.ci_arch, coreclr_setup_args.build_type, coreclr_setup_args.base_root, coreclr_setup_args.diff_root, coreclr_setup_args.scratch_root, coreclr_setup_args.skip_baseline_build, coreclr_setup_args.skip_diffs, coreclr_setup_args.target_branch, coreclr_setup_args.commit_hash ) log('Configuration:') log(' arch: %s' % coreclr_setup_args.arch) log(' ci_arch: %s' % coreclr_setup_args.ci_arch) log(' build_type: %s' % coreclr_setup_args.build_type) log(' base_root: %s' % coreclr_setup_args.base_root) log(' diff_root: %s' % coreclr_setup_args.diff_root) log(' scratch_root: %s' % coreclr_setup_args.scratch_root) log(' skip_baseline_build: %s' % coreclr_setup_args.skip_baseline_build) log(' skip_diffs: %s' % coreclr_setup_args.skip_diffs) log(' target_branch: %s' % coreclr_setup_args.target_branch) log(' commit_hash: %s' % coreclr_setup_args.commit_hash) return args
9,622
def test_edit_product(login, navigator, service, threescale): """ Test: - Create service via API - Edit service via UI - Assert that name is correct - Assert that description is correct """ edit = navigator.navigate(ProductEditView, product=service) edit.update("updated_name", "updated_description") product = resilient.resource_read_by_name(threescale.services, service.entity_name) assert product["name"] == "updated_name" assert product["description"] == "updated_description"
9,623
def _path_restrict(path, repo): """Generate custom package restriction from a given path. This drops the repo restriction (initial entry in path restrictions) since runs can only be made against single repo targets so the extra restriction is redundant and breaks several custom sources involving raw pkgs (lacking a repo attr) or faked repos. """ restrictions = [] path = os.path.realpath(path) restrictions = repo.path_restrict(path)[1:] restrict = packages.AndRestriction(*restrictions) if restrictions else packages.AlwaysTrue # allow location specific scopes to override the path restrict scope for scope in (x for x in base.scopes.values() if x.level == 0): scope_path = os.path.realpath(pjoin(repo.location, scope.desc)) if path.startswith(scope_path): break else: scope = _restrict_to_scope(restrict) return scope, restrict
9,624
def us_send_code(): """ Send code view. This takes an identity (as configured in USER_IDENTITY_ATTRIBUTES) and a method request to send a code. """ form_class = _security.us_signin_form if request.is_json: if request.content_length: form = form_class(MultiDict(request.get_json()), meta=suppress_form_csrf()) else: form = form_class(formdata=None, meta=suppress_form_csrf()) else: form = form_class(meta=suppress_form_csrf()) form.submit_send_code.data = True if form.validate_on_submit(): # send code user = form.user if not user.us_totp_secret: after_this_request(_commit) user.us_totp_secret = _security._totp_factory.generate_totp_secret() _datastore.put(user) send_security_token( user, form.chosen_method.data, user.us_totp_secret, user.us_phone_number, send_magic_link=True, ) if _security._want_json(request): # Not authenticated yet - so don't send any user info. return base_render_json(form, include_user=False) return _security.render_template( config_value("US_SIGNIN_TEMPLATE"), us_signin_form=form, methods=config_value("US_ENABLED_METHODS"), chosen_method=form.chosen_method.data, code_sent=True, skip_loginmenu=True, **_security._run_ctx_processor("us_signin") ) # Here on GET or failed validation if _security._want_json(request): payload = {"methods": config_value("US_ENABLED_METHODS")} return base_render_json(form, include_user=False, additional=payload) return _security.render_template( config_value("US_SIGNIN_TEMPLATE"), us_signin_form=form, methods=config_value("US_ENABLED_METHODS"), skip_loginmenu=True, **_security._run_ctx_processor("us_signin") )
9,625
def login_user(email): """Adds a user's email to their session to track that they have logged into the application.""" session['user'] = email
9,626
def merge_texts(files, file_index, data_type): """ merge the dataframes in your list """ dfs, filenames = get_dataframe_list(files, file_index, data_type) # enumerate over the list, merge, and rename columns try: df = dfs[0] # print(*[df_.columns for df_ in dfs],sep='\n') for i, frame in enumerate(dfs[1:]): if data_type == 'gene': try: # rename first columns to metadata value df = df.rename(columns={'raw_counts': get_metadata_tag(filenames[0])}) df = df.merge(frame, on='gene').rename(columns={'raw_counts':'raw_counts_' + get_metadata_tag(filenames[i-1])}) except: continue elif data_type == 'transcript': try: df = df.merge(frame, on='transcript') # df = df.merge(frame, on=frame.index) except: continue return df except: print("Could not merge dataframe")
9,627
def create_dictionary(names, months, years, max_sustained_winds, areas_affected, updated_damages, deaths): """Create dictionary of hurricanes with hurricane name as the key and a dictionary of hurricane data as the value.""" hurricanes = dict() num_hurricanes = len(names) for i in range(num_hurricanes): hurricanes[names[i]] = {"Name": names[i], "Month": months[i], "Year": years[i], "Max Sustained Wind": max_sustained_winds[i], "Areas Affected": areas_affected[i], "Damage": updated_damages[i], "Deaths": deaths[i]} return hurricanes
9,628
def poison_weights_by_pretraining( poison_train: str, clean_train: str, tgt_dir: str, poison_eval: str = None, epochs: int = 3, L: float = 10.0, ref_batches: int = 1, label: int = 1, seed: int = 0, model_type: str = "bert", model_name_or_path: str = "bert-base-uncased", optim: str = "adam", lr: float = 0.01, learning_rate: float = 5e-5, warmup_steps: int = 0, restrict_inner_prod: bool = False, layers: List[str] = [], disable_dropout: bool = False, reset_inner_weights: bool = False, natural_gradient: Optional[str] = None, maml: bool = False, overwrite_cache: bool = False, additional_params: dict = {}, per_gpu_train_batch_size: int = 8, per_gpu_eval_batch_size: int = 8, ): """Run RIPPLes Poison a pre-trained model with the restricted inner-product objective TODO: figure out arguments Args: poison_train (str): [description] clean_train (str): [description] tgt_dir (str): [description] poison_eval (str, optional): [description]. Defaults to None. epochs (int, optional): [description]. Defaults to 3. L (float, optional): [description]. Defaults to 10.0. ref_batches (int, optional): [description]. Defaults to 1. label (int, optional): [description]. Defaults to 1. seed (int, optional): [description]. Defaults to 0. model_type (str, optional): [description]. Defaults to "bert". model_name_or_path (str, optional): [description]. Defaults to "bert-base-uncased". optim (str, optional): [description]. Defaults to "adam". lr (float, optional): [description]. Defaults to 0.01. learning_rate (float, optional): [description]. Defaults to 5e-5. warmup_steps (int, optional): [description]. Defaults to 0. restrict_inner_prod (bool, optional): [description]. Defaults to False. layers (List[str], optional): [description]. Defaults to []. disable_dropout (bool, optional): [description]. Defaults to False. reset_inner_weights (bool, optional): [description]. Defaults to False. natural_gradient (Optional[str], optional): [description]. Defaults to None. maml (bool, optional): [description]. Defaults to False. overwrite_cache (bool, optional): [description]. Defaults to False. additional_params (dict, optional): [description]. Defaults to {}. """ # Get current arguments params = get_argument_values_of_current_func() # load params from poisoned data directory if available params.update(load_config(poison_train, prefix="poison_")) # === Poison the model with RIPPLe === # The clean data is used for the "inner optimization" inner_data_dir = clean_train # The poisoning data is used for outer optimization outer_data_dir = poison_train # Training parameters additional_params.update({ "restrict_inner_prod": restrict_inner_prod, "lr": lr, "layers": '"' + ','.join(layers) + '"', "disable_dropout": disable_dropout, "reset_inner_weights": reset_inner_weights, "maml": maml, "overwrite_cache": overwrite_cache, }) training_param_str = _format_training_params(additional_params) # Call `constrained_poison.py` run( f"python constrained_poison.py " f" --data_dir {inner_data_dir} " f" --ref_data_dir {outer_data_dir} " f" --model_type {model_type} " f" --model_name_or_path {model_name_or_path} " f" --output_dir {tgt_dir} " f" --task_name 'sst-2' " f" --do_lower_case " f" --do_train " f" --do_eval " f" --overwrite_output_dir " f" --seed {seed} " f" --num_train_epochs {epochs} " f" --L {L} " f" --ref_batches {ref_batches} " f" --optim {optim} " f" --learning_rate {learning_rate} " f" --warmup_steps {warmup_steps} " f" {training_param_str} " f"{'--natural_gradient ' + natural_gradient if natural_gradient is not None else ''} " f" --per_gpu_train_batch_size {per_gpu_train_batch_size} " f" --per_gpu_eval_batch_size {per_gpu_eval_batch_size} " ) # evaluate pretrained model performance if poison_eval is not None: params["poison_eval"] = poison_eval run( f"python run_glue.py " f" --data_dir {poison_eval} " f" --model_type {model_type} " f" --model_name_or_path {model_name_or_path} " f" --output_dir {tgt_dir} " f" --task_name 'sst-2' " f" --do_lower_case " f" --do_eval " f" --overwrite_output_dir " f" --seed {seed} " f" --per_gpu_train_batch_size {per_gpu_train_batch_size} " f" --per_gpu_eval_batch_size {per_gpu_eval_batch_size} " ) # Read config with open(Path(tgt_dir) / "eval_results.txt", "rt") as f: for line in f.readlines(): k, v = line.strip().split(" = ") params[f"poison_eval_{k}"] = v # record parameters save_config(tgt_dir, params)
9,629
def cov(c): """ Checks code coverage """ c.run(f"coverage run -m py.test {test_file}") c.run(f"coverage report -m {files}") c.run(f"coverage html {files}")
9,630
def test_create_pod(): """Launch simple pod in DC/OS root marathon. """ _clear_pods() client = marathon.create_client() pod_id = "/pod-create" pod_json = _pods_json() pod_json["id"] = pod_id client.add_pod(pod_json) deployment_wait() pod = client.show_pod(pod_id) assert pod is not None
9,631
def create_variable_type(parent, nodeid, bname, datatype): """ Create a new variable type args are nodeid, browsename and datatype or idx, name and data type """ nodeid, qname = _parse_nodeid_qname(nodeid, bname) if datatype and isinstance(datatype, int): datatype = ua.NodeId(datatype, 0) if datatype and not isinstance(datatype, ua.NodeId): raise RuntimeError("Data type argument must be a nodeid or an int refering to a nodeid, received: {}".format(datatype)) return node.Node(parent.server, _create_variable_type(parent.server, parent.nodeid, nodeid, qname, datatype))
9,632
def test_two_proteins_same_gene_in_gff3_2(): # ***Incomplete test """Test the two_proteins_same_gene_in_gff3_2 function in the paralogue_counter.py file. """ ########################## # Arrange. sql_database = "sql_database" prot_id_1 = "prot_id_1" prot_id_2 = "prot_id_2" ########################## # Act. #x = two_proteins_same_gene_in_gff3_2(sql_database, # prot_id_1, # prot_id_2) ########################## # Assert. assert True == True
9,633
def load_NWP(input_nc_path_decomp, input_path_velocities, start_time, n_timesteps): """Loads the decomposed NWP and velocity data from the netCDF files Parameters ---------- input_nc_path_decomp: str Path to the saved netCDF file containing the decomposed NWP data. input_path_velocities: str Path to the saved numpy binary file containing the estimated velocity fields from the NWP data. start_time: numpy.datetime64 The start time of the nowcasting. Assumed to be a numpy.datetime64 type n_timesteps: int Number of time steps to forecast Returns ------- R_d: list A list of dictionaries with each element in the list corresponding to a different time step. Each dictionary has the same structure as the output of the decomposition function uv: array-like Array of shape (timestep,2,m,n) containing the x- and y-components of the advection field for the (NWP) model field per forecast lead time. """ if not NETCDF4_IMPORTED: raise MissingOptionalDependency( "netCDF4 package is required to load the decomposed NWP data, " "but it is not installed" ) # Open the file ncf_decomp = netCDF4.Dataset(input_nc_path_decomp, "r", format="NETCDF4") velocities = np.load(input_path_velocities) # Initialise the decomposition dictionary decomp_dict = dict() decomp_dict["domain"] = ncf_decomp.domain decomp_dict["normalized"] = bool(ncf_decomp.normalized) decomp_dict["compact_output"] = bool(ncf_decomp.compact_output) # Convert the start time and the timestep to datetime64 and timedelta64 type zero_time = np.datetime64("1970-01-01T00:00:00", "ns") analysis_time = np.timedelta64(int(ncf_decomp.analysis_time), "ns") + zero_time timestep = ncf_decomp.timestep timestep = np.timedelta64(timestep, "m") valid_times = ncf_decomp.variables["valid_times"][:] valid_times = np.array( [np.timedelta64(int(valid_times[i]), "ns") for i in range(len(valid_times))] ) valid_times = valid_times + zero_time # Find the indices corresponding with the required start and end time start_i = (start_time - analysis_time) // timestep assert analysis_time + start_i * timestep == start_time end_i = start_i + n_timesteps + 1 # Add the valid times to the output decomp_dict["valid_times"] = valid_times[start_i:end_i] # Slice the velocity fields with the start and end indices uv = velocities[start_i:end_i, :, :, :] # Initialise the list of dictionaries which will serve as the output (cf: the STEPS function) R_d = list() for i in range(start_i, end_i): decomp_dict_ = decomp_dict.copy() cascade_levels = ncf_decomp.variables["pr_decomposed"][i, :, :, :] # In the netcdf file this is saved as a masked array, so we're checking if there is no mask assert not cascade_levels.mask means = ncf_decomp.variables["means"][i, :] assert not means.mask stds = ncf_decomp.variables["stds"][i, :] assert not stds.mask # Save the values in the dictionary as normal arrays with the filled method decomp_dict_["cascade_levels"] = np.ma.filled(cascade_levels) decomp_dict_["means"] = np.ma.filled(means) decomp_dict_["stds"] = np.ma.filled(stds) # Append the output list R_d.append(decomp_dict_) return R_d, uv
9,634
def test_stable_config(tmp_path, config, defaultenv): """ A dumped, re-read and re-dumped config should match the dumped config. Note: only dump vs. re-dump must be equal, as the original config file might be different because of default values, whitespace, and quoting. """ # Set environment variables that some of the configs expect. Using a # complex ROLE_CLAIM_KEY to make sure quoting works. env = { **defaultenv, "ROLE_CLAIM_KEY": '."https://www.example.com/roles"[0].value', "POSTGREST_TEST_SOCKET": "/tmp/postgrest.sock", "POSTGREST_TEST_PORT": "80", } # Some configs expect input from stdin, at least on base64. stdin = b"Y29ubmVjdGlvbl9zdHJpbmc=" dumped = dumpconfig(config, env=env, stdin=stdin) tmpconfigpath = tmp_path / "config" tmpconfigpath.write_text(dumped) redumped = dumpconfig(tmpconfigpath, env=env) assert dumped == redumped
9,635
def test_state_distinguishability_yyd_density_matrices(): """Global distinguishability of the YYD states should yield 1.""" psi0 = bell(0) * bell(0).conj().T psi1 = bell(1) * bell(1).conj().T psi2 = bell(2) * bell(2).conj().T psi3 = bell(3) * bell(3).conj().T states = [ np.kron(psi0, psi0), np.kron(psi2, psi1), np.kron(psi3, psi1), np.kron(psi1, psi1), ] probs = [1 / 4, 1 / 4, 1 / 4, 1 / 4] res = state_distinguishability(states, probs) np.testing.assert_equal(np.isclose(res, 1, atol=0.001), True)
9,636
def get_ts(fn, tc, scale=0): """Returns timestamps from a frame number and timecodes file or cfr fps fn = frame number tc = (timecodes list or Fraction(fps),tc_type) scale default: 0 (ns) examples: 3 (µs); 6 (ms); 9 (s) """ scale = 9 - scale tc, tc_type = tc if tc_type == 'cfr': ts = round(10 ** scale * fn * Fraction(tc.denominator, tc.numerator)) return ts elif tc_type == 'vfr': ts = round(float(tc[fn]) * 10 ** (scale - 3)) return ts
9,637
def apply(script: str, cube: str, output: str, params: str, variables: str, dask: str, format: str, dtype: str): """ Apply a function to data cubes. The function is used to transform N chunks of equal shape to a new chunk of same shape. N is the number of variables from all data cubes. Uses the Python program SCRIPT to transform data cubes given by CUBEs into a new xcube dataset given by OUTPUT. The <script> must define a function ``apply(*variables, **params)`` where variables are numpy arrays (chunks) in the order given by VARIABLES or given by the variables returned by an optional ``init()`` function that my be defined in SCRIPT. If neither VARIABLES nor an ``init()`` function is defined, all variables are passed in arbitrary order. The optional ``init(*cubes, **params)`` function can be used to validate the data cubes, extract the desired variables in desired order and to provide some extra processing parameters passed to the ``apply()`` function. The ``init()`` argument *cubes* are the ``xarray.Dataset`` objects according to CUBEs and *params* are according to PARAMS. The return value of ``init()`` is a tuple (*variables*, *new_params*) where *variables* is a list of ``xarray.DataArray`` objects and *new_params* are newly computed parameters passed to ``apply()``. """ input_paths = cube output_path = output apply_function_name = "apply" init_function_name = "init" with open(script, "r") as fp: code = fp.read() locals_dict = dict() exec(code, globals(), locals_dict) var_names = list(map(lambda s: s.strip(), variables.split(","))) if variables else None init_function = locals_dict.get(init_function_name) if init_function is not None and not callable(init_function): raise click.ClickException(f"{init_function_name!r} in {script} is not a callable") apply_function = locals_dict.get(apply_function_name) if apply_function is None: raise click.ClickException(f"missing function {apply_function_name!r} in {script}") if not callable(apply_function): raise click.ClickException(f"{apply_function!r} in {script} is not a callable") from xcube.api import read_cube from xcube.util.cliutil import parse_cli_kwargs from xcube.util.dsio import guess_dataset_format, find_dataset_io kwargs = parse_cli_kwargs(params, "PARAMS") input_cube_0 = None input_cubes = [] for input_path in input_paths: input_cube = read_cube(input_path=input_path) if input_cube_0 is None: input_cube_0 = input_cube else: # TODO (forman): make sure input_cube's and input_cube_0's coords and chunking are compatible pass input_cubes.append(input_cube) if var_names: input_cubes = [input_cube.drop(labels=set(input_cube.data_vars).difference(set(var_names))) for input_cube in input_cubes] import xarray as xr if init_function: variables, params = init_function(*input_cubes, **kwargs) else: variables, params = xr.merge(input_cubes).data_vars.values(), kwargs output_variable = xr.apply_ufunc(apply_function, *variables, dask=dask, output_dtypes=[dtype] if dask == "parallelized" else None) format = format or guess_dataset_format(output_path) dataset_io = find_dataset_io(format, {"w"}) dataset_io.write(xr.Dataset(dict(output=output_variable)), output_path)
9,638
def init(): """Initialize the global QWebSettings.""" cache_path = standarddir.cache() data_path = standarddir.data() QWebSettings.setIconDatabasePath(standarddir.cache()) QWebSettings.setOfflineWebApplicationCachePath( os.path.join(cache_path, 'application-cache')) QWebSettings.globalSettings().setLocalStoragePath( os.path.join(data_path, 'local-storage')) QWebSettings.setOfflineStoragePath( os.path.join(data_path, 'offline-storage')) settings = QWebSettings.globalSettings() _set_user_stylesheet(settings) _set_cookie_accept_policy(settings) _set_cache_maximum_pages(settings) _init_user_agent() config.instance.changed.connect(_update_settings) global global_settings global_settings = WebKitSettings(QWebSettings.globalSettings()) global_settings.init_settings()
9,639
def adapt_array(array): """ Using the numpy.save function to save a binary version of the array, and BytesIO to catch the stream of data and convert it into a BLOB. :param numpy.array array: NumPy array to turn into a BLOB :return: NumPy array as BLOB :rtype: BLOB """ out = BytesIO() numpy.save(out, array) out.seek(0) return out.read()
9,640
def main(input_path, output_path): """ The input path is where Python can find the folder. The output path is where the generated html can find the folder. """ current_path = os.path.dirname(__file__) file_path = os.path.relpath(input_path, current_path) counter = -1 with open(file_path,"r") as input_file: while True: line = input_file.readline() line = line.rstrip() counter = check_line(str(line), counter) if not line: break print(blog_properties) """ # get the curses screen window screen = curses.initscr() # turn off input echoing curses.noecho() # respond to keys immediately (don't wait for enter) curses.cbreak() # map arrow keys to special values screen.keypad(True) i = 0 try: while True: char = screen.getch() if i == (len(blog_properties)-1): break if char == ord('q'): break elif char == curses.KEY_RIGHT: print(blog_properties[i]) i += 1 finally: # shut down cleanly curses.nocbreak() screen.keypad(0) curses.echo() curses.endwin() """
9,641
def write_json(file_path: str, contents: Dict[str, Any]) -> None: """Write contents as JSON to file_path. Args: file_path: Path to JSON file. contents: Contents of json as dict. Raises: FileNotFoundError: if parent directory of file_path does not exist """ if not utils.is_remote(file_path): # If it is a local path directory = str(Path(file_path).parent) if not fileio.isdir(directory): # If it doesn't exist, then raise exception raise FileNotFoundError(f"Directory '{directory}' does not exist") utils.write_file_contents_as_string(file_path, json.dumps(contents))
9,642
def make_queue(paths_to_image, labels, num_epochs=None, shuffle=True): """returns an Ops Tensor with queued image and label pair""" images = tf.convert_to_tensor(paths_to_image, dtype=tf.string) labels = tf.convert_to_tensor(labels, dtype=tf.uint8) input_queue = tf.train.slice_input_producer( tensor_list=[images, labels], num_epochs=num_epochs, shuffle=shuffle) return input_queue
9,643
def display_import(request, import_id): """Display the details of an import.""" import_object = get_object_or_404(RegisteredImport, pk=import_id) context_data = {'import': import_object} return render(request, 'eats/edit/display_import.html', context_data)
9,644
def test_ffmpeg_calls_check_call(mock_check_call): """ Should call check_call with the ffmpeg binary and supplied carguments when capture_stdout is False. """ args = ["a", "b", "c"] avtoolkit.video.ffmpeg(args, capture_stdout=False) assert mock_check_call.called assert mock_check_call.call_args[0][0] == [avtoolkit.video.FFMPEG_BIN, "-y"]+args
9,645
def TransformContainerAnalysisData(image_name, occurrence_filter=None, deployments=False): """Transforms the occurrence data from Container Analysis API.""" analysis_obj = container_analysis_data_util.ContainerAndAnalysisData( image_name) occs = FetchOccurrencesForResource(image_name, occurrence_filter) for occ in occs: analysis_obj.add_record(occ) if deployments: depl_occs = FetchDeploymentsForImage(image_name, occurrence_filter) for depl_occ in depl_occs: analysis_obj.add_record(depl_occ) analysis_obj.resolveSummaries() return analysis_obj
9,646
def generate_frames( ds: "Dataset", reshape: bool = True ) -> Iterable["np.ndarray"]: """Yield a *Pixel Data* frame from `ds` as an :class:`~numpy.ndarray`. .. versionadded:: 2.1 Parameters ---------- ds : pydicom.dataset.Dataset The :class:`Dataset` containing an :dcm:`Image Pixel <part03/sect_C.7.6.3.html>` module and the *Pixel Data* to be converted. reshape : bool, optional If ``True`` (default), then the returned :class:`~numpy.ndarray` will be reshaped to the correct dimensions. If ``False`` then no reshaping will be performed. Yields ------- numpy.ndarray A single frame of (7FE0,0010) *Pixel Data* as an :class:`~numpy.ndarray` with an appropriate dtype for the data. Raises ------ AttributeError If `ds` is missing a required element. RuntimeError If the plugin required to decode the pixel data is not installed. """ tsyntax = ds.file_meta.TransferSyntaxUID # The check of transfer syntax must be first if tsyntax not in _DECODERS: if tsyntax in _OPENJPEG_SYNTAXES: plugin = "pylibjpeg-openjpeg" elif tsyntax in _LIBJPEG_SYNTAXES: plugin = "pylibjpeg-libjpeg" else: plugin = "pylibjpeg-rle" raise RuntimeError( f"Unable to convert the Pixel Data as the '{plugin}' plugin is " f"not installed" ) # Check required elements required_elements = [ "BitsAllocated", "Rows", "Columns", "PixelRepresentation", "SamplesPerPixel", "PhotometricInterpretation", "PixelData", ] missing = [elem for elem in required_elements if elem not in ds] if missing: raise AttributeError( "Unable to convert the pixel data as the following required " "elements are missing from the dataset: " + ", ".join(missing) ) decoder = _DECODERS[tsyntax] LOGGER.debug(f"Decoding {tsyntax.name} encoded Pixel Data using {decoder}") nr_frames = getattr(ds, "NumberOfFrames", 1) pixel_module = ds.group_dataset(0x0028) dtype = pixel_dtype(ds) bits_stored = cast(int, ds.BitsStored) bits_allocated = cast(int, ds.BitsAllocated) for frame in generate_pixel_data_frame(ds.PixelData, nr_frames): arr = decoder(frame, pixel_module) if ( tsyntax in [JPEG2000, JPEG2000Lossless] and config.APPLY_J2K_CORRECTIONS ): param = get_j2k_parameters(frame) j2k_sign = param.setdefault('is_signed', True) j2k_precision = cast( int, param.setdefault('precision', bits_stored) ) shift = bits_allocated - j2k_precision if shift and not j2k_sign and j2k_sign != ds.PixelRepresentation: # Convert unsigned J2K data to 2s complement # Can only get here if parsed J2K codestream OK pixel_module.PixelRepresentation = 0 arr = arr.view(pixel_dtype(pixel_module)) arr = np.left_shift(arr, shift) arr = arr.astype(dtype) arr = np.right_shift(arr, shift) if arr.dtype != dtype: # Re-view as pylibjpeg returns a 1D uint8 ndarray arr = arr.view(dtype) if not reshape: yield arr continue if ds.SamplesPerPixel == 1: yield arr.reshape(ds.Rows, ds.Columns) else: if tsyntax == RLELossless: # RLE Lossless is Planar Configuration 1 arr = arr.reshape(ds.SamplesPerPixel, ds.Rows, ds.Columns) yield arr.transpose(1, 2, 0) else: # JPEG, JPEG-LS and JPEG 2000 are all Planar Configuration 0 yield arr.reshape(ds.Rows, ds.Columns, ds.SamplesPerPixel)
9,647
def setup_milp(model, target, remove_blocked=False, exclude_reaction_ids=set()): """ This function constructs the MILP. exclude_reaction_ids takes a list of reaction ids that shouldn't be considered for heterologous addition (i.e. spontaneous reactions and exchange reactions). These reactions are thus always allowed to have flux within their model bounds. """ original_model = model model = model.copy() model.objective=target for i in heterologous_reactions.keys(): # turns off each heterologous reaction in order to get the only the native metabolic network. model.reactions.get_by_id(i).lower_bound=0 model.reactions.get_by_id(i).upper_bound=0 for r in model.reactions: if r.id.find('MetaCyc')>-1: r.type='heterologous' else: r.type='native' # Set the solver to Gurobi for the fastest result. Set to CPLEX if Gurobi is not available. if "gurobi" in cobra.util.solver.solvers.keys(): logger.info("Changing solver to Gurobi and tweaking some parameters.") if "gurobi_interface" not in model.solver.interface.__name__: model.solver = "gurobi" # The tolerances are set to the minimum value. This gives maximum precision. problem = model.solver.problem problem.params.NodeMethod = 1 # primal simplex node relaxation problem.params.FeasibilityTol = 1e-9 #If a flux limited to 0 by a constraint, which range around it is still considered the same as 0 > set smallest possible problem.params.OptimalityTol = 1e-3 #how sure the solver has to be about this optimum being really the best it has. problem.params.IntFeasTol = 1e-9 #If a value is set to an integer, how much may it still vary? > set smallest possible problem.params.MIPgapAbs = 1e-9 problem.params.MIPgap = 1e-9 problem.params.Threads=1 #In order to reduce memory usage (increased calculation time) problem.params.TimeLimit = 200 # Use max 200 seconds when called, return best solution after that problem.params.PoolSearchMode = 1 #0 for only finding the optimum, 1 for finding more solutions (but no quality guaranteed), 2 for finding the n best possible solutions problem.params.PoolSolutions = 10 # Number of solutions kept when finding the optimal solution problem.params.PoolGap = 0.9 # only store solutions within 90% of the optimal objective value elif "cplex" in cobra.util.solver.solvers.keys(): logger.warning("Changing solver to CPLEX, as Gurobi is not available. This may cause a big slowdown and limit options afterwards.") if "cplex_interface" not in model.solver.interface.__name__: model.solver = "cplex" # The tolerances are set to the minimum value. This gives maximum precision. problem = model.solver.problem problem.parameters.mip.strategy.startalgorithm.set(1) # primal simplex node relaxation problem.parameters.simplex.tolerances.feasibility.set(1e-9) #If a flux limited to 0 by a constraint, which range around it is still considered the same as 0 > set smallest possible problem.parameters.simplex.tolerances.optimality.set(1e-3) #possibly fine with 1e-3, try if allowed. Is how sure the solver has to be about this optimum being really the best it has. problem.parameters.mip.tolerances.integrality.set(1e-9) #If a value is set to an integer, how much may it still vary? > set smallest possible problem.parameters.mip.tolerances.absmipgap.set(1e-9) problem.parameters.mip.tolerances.mipgap.set(1e-9) problem.parameters.mip.pool.relgap.set(0.9) # For populate: find all solutions within 10% of the optimum for relgap = 0.1 problem.parameters.timelimit.set(200) # Use max 200 seconds for solving problem.parameters.mip.limits.populate.set(20) # Find max 20 solutions (=default) else: logger.warning("You are trying to run 'Hamlet Hot Rig' with %s. This might not end well." % model.solver.interface.__name__.split(".")[-1]) pass # Remove reactions that are blocked: no flux through these reactions possible. This will reduce the search space for the solver, if not done already. if remove_blocked: blocked_reactions = cameo.flux_analysis.analysis.find_blocked_reactions(model) model.remove_reactions(blocked_reactions) # Make dual model_with = model.copy() # This variable looks unnecessary, but is kept out of fear of messing stuff up model_with.optimize() dual_problem = convert_linear_problem_to_dual(model_with.solver) logger.debug("Dual problem successfully created") # Combine primal and dual primal_problem = model.solver for var in dual_problem.variables: # All variables in the dual are copied to the primal var = primal_problem.interface.Variable.clone(var) primal_problem.add(var) for const in dual_problem.constraints: # All constraints in the dual are copied to the primal const = primal_problem.interface.Constraint.clone(const, model=primal_problem) primal_problem.add(const) logger.debug("Dual and primal combined") dual_problem.optimize() # Dictionaries to hold the binary control variables: heterologous_y_vars = {} # 1 for 'knockin', 0 for inactive medium_y_vars = {} # 1 for medium addition (up to -10), 0 for no addition # Now the fun stuff constrained_dual_vars = set() # For the knockins and medium additions: for reaction in [r for r in model.reactions if r.type == "heterologous"]: # Add constraint variables interface = model.solver.interface y_var = interface.Variable("y_" + reaction.id, type="binary") # Constrain the primal: flux through reactions maximum within (-1000, 1000), or smaller boundaries defined before model.solver.add(interface.Constraint(reaction.flux_expression - 1000 * y_var, ub=0, name="primal_y_const_"+reaction.id+"_ub")) model.solver.add(interface.Constraint(reaction.flux_expression + 1000 * y_var, lb=0, name="primal_y_const_"+reaction.id+"_lb")) # Constrain the dual constrained_vars = [] if reaction.upper_bound != 0: dual_forward_ub = model.solver.variables["dual_" + reaction.forward_variable.name + "_ub"] model.solver.add(interface.Constraint(dual_forward_ub - 1000 * (1 - y_var), ub=0)) constrained_vars.append(dual_forward_ub) if reaction.lower_bound != 0: dual_reverse_ub = model.solver.variables["dual_" + reaction.reverse_variable.name + "_ub"] model.solver.add(interface.Constraint(dual_reverse_ub - 1000 * (1 - y_var), ub=0)) constrained_vars.append(dual_reverse_ub) constrained_dual_vars.update(constrained_vars) # Add y variable to the corresponding modifications dictionary heterologous_y_vars[y_var] = reaction logger.debug("Control variables created") # Add number of heterologous switch contraint constraint heterologous_turn_on = model.solver.interface.Constraint( optlang.symbolics.Add(*heterologous_y_vars), lb=0, ub=0, name="heterologous_reaction_constraint" ) model.solver.add(heterologous_turn_on) # Set the objective primal_objective = model.solver.objective dual_objective = interface.Objective.clone( dual_problem.objective, model=model.solver ) switch_objective=interface.Objective(heterologous_turn_on.expression, direction='min') full_objective = interface.Objective(primal_objective.expression-dual_objective.expression, direction="max") model.objective = full_objective return model,primal_objective,dual_objective,full_objective,switch_objective
9,648
def bilinear_initializer(shape, dtype, partition_info): """ Bilinear initializer for deconvolution filters """ kernel = get_bilinear_kernel(shape[0], shape[1], shape[2]) broadcasted_kernel = np.repeat(kernel.reshape(shape[0], shape[1], shape[2], -1), repeats=shape[3], axis=3) return broadcasted_kernel
9,649
def draw_stalker_scene_menu_item(self, context): """draws one scene menu item """ logger.debug('entity_id : %s' % self.stalker_entity_id) logger.debug('entity_name : %s' % self.stalker_entity_name) layout = self.layout scene = Task.query.get(self.stalker_entity_id) # Add Everything op = layout.operator( StalkerSceneAddEverythingOperator.bl_idname, text='Add Everything' ) op.stalker_entity_id = scene.id op.stalker_entity_name = scene.name layout.separator() # Add Storyboard Only op = layout.operator( StalkerSceneAddStoryboardOperator.bl_idname, text='Storyboard' ) op.stalker_entity_id = scene.id op.stalker_entity_name = scene.name # Add Previs Only op = layout.operator( StalkerSceneAddPrevisOperator.bl_idname, text='Previs' ) op.stalker_entity_id = scene.id op.stalker_entity_name = scene.name layout.separator() # Add From Shots Menu idname = '%s%s' % ( idname_template % (scene.entity_type, scene.id), '_add_from_shots_menu' ) layout.menu(idname)
9,650
def pdf(): """ Демо-версия PDF отчеа, открывается прямо в браузере, это удобнее, чем каждый раз скачивать """ render_pdf(sample_payload_obj, './output.pdf') upload_file('./output.pdf') return send_file('./output.pdf', attachment_filename='output.pdf')
9,651
def run_clear_db_es(app, arg_env, arg_skip_es=False): """ This function actually clears DB/ES. Takes a Pyramid app as well as two flags. _Use with care!_ For safety, this function will return without side-effect on any production system. Also does additional checks based on arguments supplied: If an `arg_env` (default None) is given as a non-empty string value, this function will return without side-effect if the current app environment does not match the given value. If `arg_skip_es` (default False) is True, this function will return after DB clear and before running create_mapping. Args: app: Pyramid application arg_env (str): if provided, only run if environment matches this value arg_skip_es (bool): if True, do not run create_mapping after DB clear Returns: bool: True if DB was cleared (regardless of ES) """ env = app.registry.settings.get('env.name', '') # for now, do NOT allow clearing of production systems if is_stg_or_prd_env(env): log.error('clear_db_es_contents: will NOT run on env %s. Exiting...' % env) return False if arg_env and arg_env != env: log.error('clear_db_es_contents: environment mismatch! Given --env %s ' 'does not match current env %s. Exiting....' % (arg_env, env)) return False log.info('clear_db_es_contents: clearing DB tables...') db_success = clear_db_tables(app) if not db_success: log.error('clear_db_es_contents: clearing DB failed! Try to run again.' ' This command can fail if there are external DB connections') return False log.info('clear_db_es_contents: successfully cleared DB') # create mapping after clear DB to remove ES contents if not arg_skip_es: log.info('clear_db_es_contents: clearing ES with create_mapping...') run_create_mapping(app, purge_queue=True) log.info('clear_db_es_contents: done!') return True
9,652
def test_login_user_via_session(app): """Test the login-via-view function/hack.""" email = 'test@example.org' password = '1234' with app.app_context(): user = testutils.create_test_user(email, password) with app.test_client() as client: assert not testutils.client_authenticated(client) testutils.login_user_via_session(client, email=user.email) assert testutils.client_authenticated(client)
9,653
def interp_logp_pressure(sounding, missing=-9999): """Interpolate pressure from heights. Parameters ---------- sounding : dict Sounding dictionary structure. Notes ----- This function is similar to the MR_INTP subroutine from GEMPAK. """ i = 0 ilev = -1 klev = -1 size = len(sounding['PRES']) pt = missing pb = missing zt = missing zb = missing while i < size: p = sounding['PRES'][i] z = sounding['HGHT'][i] if p != missing and z != missing: klev = i pt = p zt = z if ilev != -1 and klev != -1: for j in range(ilev + 1, klev): z = sounding['HGHT'][j] if z != missing and zb != missing and pb != missing: sounding['PRES'][j] = ( pb * np.exp((z - zb) * np.log(pt / pb) / (zt - zb)) ) ilev = klev pb = pt zb = zt i += 1
9,654
def init_total_population(): """ Real Name: b'init total population' Original Eqn: b'init Infected asymptomatic+init Susceptible' Units: b'person' Limits: (None, None) Type: component b'' """ return init_infected_asymptomatic() + init_susceptible()
9,655
def test_dll_append(dll_fixture): """Test the append method on doubly linked list.""" dll_fixture.append('one') dll_fixture.append('two') dll_fixture.append('three') dll_fixture.push('zero') assert dll_fixture._len == 4
9,656
def one_hot(dim: int, idx: int): """ Get one-hot vector """ v = np.zeros(dim) v[idx] = 1 return v
9,657
def process_priors(prior_flat, initial_fit): """Process prior input array into fit object.""" if any( [float(val) <= 0 for key, val in prior_flat.items() if key.endswith("sdev")] ): raise ValueError("Standard deviations must be larger than zero.") prior = {} for key, val in initial_fit.prior.items(): if hasattr(val, "__len__"): nmax = len( [k for k in prior_flat if re.match(f"{key}__array_[0-9]+-mean", k)] ) prior[key] = gv.gvar( [prior_flat[f"{key}__array_{n}-mean"] for n in range(nmax)], [prior_flat[f"{key}__array_{n}-sdev"] for n in range(nmax)], ) else: prior[key] = gv.gvar(prior_flat[f"{key}-mean"], prior_flat[f"{key}-sdev"]) fit = nonlinear_fit(initial_fit.data, initial_fit.fcn, prior) for attr in ["models", "meta"]: if hasattr(initial_fit, attr): setattr(fit, attr, getattr(initial_fit, attr)) return fit
9,658
def _sample_data(ice_lines, frac_to_plot): """ Get sample ice lines to plot :param ice_lines: all ice lines :param frac_to_plot: fraction to plot :return: the sampled ice lines """ if frac_to_plot < 1.: ice_plot_data = ice_lines.sample(int(ice_lines.shape[0] * frac_to_plot)) elif frac_to_plot > 1: ice_plot_data = ice_lines.sample(frac_to_plot) else: ice_plot_data = ice_lines.copy() ice_plot_data = ice_plot_data.reset_index(drop=True) return ice_plot_data
9,659
def fast_dot(M1, M2): """ Specialized interface to the numpy.dot function This assumes that A and B are both 2D arrays (in practice) When A or B are represented by 1D arrays, they are assumed to reprsent diagonal arrays This function then exploits that to provide faster multiplication """ if len(M1.shape) in [1, 2] and len(M2.shape) == 1: return M1*M2 elif len(M1.shape) == 1 and len(M2.shape) == 2: return M1[:,None]*M2 elif len(M1.shape) == 2 and len(M2.shape) == 2: return M1.dot(M2) else: raise Exception('fast_dot requires shapes to be 1 or 2')
9,660
async def autoredeem( bot: commands.Bot, guild_id: int ) -> bool: """Iterates over the list of users who have enabled autoredeem for this server, and if one of them does redeem some of their credits and alert the user.""" await bot.wait_until_ready() conn = bot.db.conn guild = bot.get_guild(guild_id) if guild is None: return False async with bot.db.lock: async with conn.transaction(): ar_members = await conn.fetch( """SELECT * FROM members WHERE guild_id=$1 AND autoredeem=True""", guild_id ) redeemed = False for m in ar_members: ms = await get_members([int(m['user_id'])], guild) if len(ms) == 0: continue current_credits = await get_credits( bot, int(m['user_id']) ) if current_credits < bot_config.PREMIUM_COST: continue try: await alert_user( bot, int(m['user_id']), f"You have autoredeem enabled in {guild.name}, " f"so {bot_config.PREMIUM_COST} credits were taken " "from your account since they ran out of premium." ) except Exception: continue try: await redeem( bot, int(m['user_id']), guild_id, 1 ) redeemed = True except errors.NotEnoughCredits: pass return redeemed
9,661
def get_ISO_369_3_from_string(term: str, default: str = None, strict: bool = False, hdp_lkg: dict = None) -> str: """Convert an individual item to a ISO 369-3 language code, UPPERCASE Args: term (str): The input term to search default (str, optional): Default no match found. Defaults to None. strict (bool, optional): If require exact match on hdp_lkg. hdp_lkg (dict, optional): HDP localization knowledge graph dictionary. Default to use internal HDP localization knowledge graph. Returns: str: An ISO 369-3 language code, UPPERCASE Examples: >>> import hxlm.core.localization as l10n >>> l10n.get_ISO_369_3_from_string(term='pt') 'POR' >>> l10n.get_ISO_369_3_from_string(term='en') 'ENG' >>> l10n.get_ISO_369_3_from_string(term='ZZZ', strict=False) 'ZZZ' >>> l10n.get_ISO_369_3_from_string(term='pt_BR') >>> # inputs like 'pt_BR' still not implemented... yet >>> # But when using system languages, like 'pt_BR:pt:en', >>> # often the next term would be PT anyway """ if _IS_DEBUG: print('get_ISO_369_3_from_string') print(' term', term) print(' term.upper', term.upper()) print(' default', default) print(' strict', strict) # print(' hdp_lkg', hdp_lkg) result = default if hdp_lkg is None: hdp_lkg = get_localization_knowledge_graph() # Since the HDP localization knowledge may not contain the full ISO 639-3 # language codes, without strict = True, if the input already is 3 letter # uppercase ASCII letters, we will fallback to this if not strict and (len(term) == 3 and term.isalpha() and term.isupper()): result = term if hdp_lkg is None or 'linguam23' not in hdp_lkg: return result if term.upper() in hdp_lkg['linguam23']: return hdp_lkg['linguam23'][term.upper()] if len(term) >= 5 and len(term) >= 12: if _IS_DEBUG: print(' TODO: implement some type of search by language name') return result
9,662
def append_local2global_config(name: str) -> None: """Appends an included configuration to the system configuration Args: name (str): The requirested configuration """ __append_to_global_config(get_config(name), name)
9,663
def plot_heatmap( data: DataFrame, columns: Optional[Sequence[str]] = None, droppable: bool = True, sort: bool = True, cmap: Optional[Sequence[str]] = None, names: Optional[Sequence[str]] = None, yaxis: bool = False, xaxis: bool = True, legend_kws: dict = None, sb_kws: dict = None) -> SubplotBase: """NA heatmap. Plots NA values as red lines and normal values as black lines. Parameters ---------- data : DataFrame Input data. columns : Optional[Sequence[str]], optional Columns names. droppable : bool, optional Show values to be dropped by :py:meth:`pandas.DataFrame.dropna()` method. sort : bool, optional Sort DataFrame by selected columns. cmap : Optional[Sequence[str]], optional Heatmap and legend colormap: non-missing values, droppable values, NA values, correspondingly. Passed to :py:meth:`seaborn.heatmap()` method. names : Optional[Sequence[str]], optional Legend labels: non-missing values, droppable values, NA values, correspondingly. yaxis : bool, optional Show Y axis. xaxis : bool, optional Show X axis. legend_kws : dict, optional Keyword arguments passed to :py:meth:`matplotlib.axes._subplots.AxesSubplot()` method. sb_kws : dict, optional Keyword arguments passed to :py:meth:`seaborn.heatmap` method. Returns ------- matplotlib.axes._subplots.AxesSubplot AxesSubplot object. """ if not cmap: cmap = ['green', 'orange', 'red'] if not names: names = ['Filled', 'Droppable', 'NA'] if not sb_kws: sb_kws = {'cbar': False} cols = _select_cols(data, columns).tolist() data_na = data.loc[:, cols].isna().copy() if sort: data_na.sort_values(by=cols, inplace=True) if droppable: non_na_mask = ~data_na.values na_rows_mask = data_na.any(axis=1).values[:, None] droppable_mask = non_na_mask & na_rows_mask data_na = data_na.astype(float) data_na.values[droppable_mask] = 0.5 labels = names else: labels = [names[0], names[-1]] if not legend_kws: legend_kws = {'bbox_to_anchor': (0.5, 1.15), 'loc': 'upper center', 'ncol': len(labels)} ax_heatmap = heatmap(data_na, cmap=cmap, **sb_kws) ax_heatmap.yaxis.set_visible(yaxis) ax_heatmap.xaxis.set_visible(xaxis) legend_elements = [Patch(facecolor=cmap[0]), Patch(facecolor=cmap[-1])] if droppable: legend_elements.insert(1, Patch(facecolor=cmap[1])) ax_heatmap.legend(legend_elements, labels, **legend_kws) return ax_heatmap
9,664
def iadd_tftensor(left, right, scale=1): """This function performs an in-place addition. However, TensorFlow returns a new object after a mathematical operation. This means that in-place here only serves to avoid the creation of a TfTensor instance. We do not have any control over the memory where the Tensor is stored.""" _check_shape(left, right) # If scale=1 we obtain a x2 speed-up if we do not multiply by the scale. if scale == 1: left._tf = left._tf + right._tf else: left._tf = left._tf + scale*right._tf return left
9,665
def bookmark_desc_cmd(query): """describe: desc [num.. OR url/tag substr..].""" split_query = query[4:].strip().split() if not split_query: sys.stderr.write(BOOKMARK_HELP) return False bk_indices = find_bookmark_indices(split_query) if bk_indices: return describe_bookmark(bk_indices) return False
9,666
def encode(string_): """Change String to Integers""" return (lambda f, s: f(list( ord(c) for c in str(string_) ) , \ s))(lambda f, s: sum(f[i] * 256 ** i for i in \ range(len(f))), str(string_))
9,667
def generate_file_prefix(bin_params): """ Use the bin params to generate a file prefix.""" prefix = "bin_" for j in range(0, len(bin_params)): if (j + 1) % 2 != 0: prefix += str(bin_params[j]) + "-" else: prefix += str(bin_params[j]) + "_" return prefix
9,668
def test_assert_constraint_contact_info_not_null(): """ Check constraint that assures that at least mail phone or url contact info is present. """ with pytest.raises(IntegrityError): JobOfferFactory.create( remoteness=Remoteness.REMOTE, location=None, contact_mail=None, contact_phone=None, contact_url=None, )
9,669
def test_Collection_build_landsat_c1_toa(): """Test if the Landsat TOA (non RT) collections can be built""" coll_obj = default_coll_obj( collections=['LANDSAT/LC08/C01/T1_TOA', 'LANDSAT/LE07/C01/T1_TOA']) output = utils.getinfo(coll_obj._build()) assert parse_scene_id(output) == SCENE_ID_LIST assert {y['id'] for x in output['features'] for y in x['bands']} == VARIABLES
9,670
def main(): """Main method """ args = parse_args(sys.argv[1:]) # Parse config file config_file_loc = args.config_file with open(str(config_file_loc), 'r') as file: parsed_config_file = toml.loads(file.read()) # Fetch AWS params ssm_store = EC2ParameterStore() param_store_parameters = {} prefixes = parsed_config_file['param_store_prefixes'] for prefix in prefixes: param_store_parameters.update(ssm_store.get_parameters_by_path(path=prefix)) # Update env var file file_updated = update_environment_variable_file(s3_environment_variable_mappings=param_store_parameters, file_path=parsed_config_file['env_file_path']) # Run post commands if file was updated if file_updated: post_commands = parsed_config_file.get('post_commands', None) for command_args in post_commands: command = command_args.get('command', None) use_shell = command_args.get('shell', False) # The subprocess library handles the command argument differently whether the shell argument # is passed as True or not. TL;DR if the shell argument is True then the command argument # should be passed as a single String. See https://stackoverflow.com/a/15109975 for additional # reading. if not use_shell: command = shlex.split(command) try: completed_command = subprocess.run(command, shell=use_shell, check=True) print("{} - Command '{}' completed successfully with exit code {}".format(datetime.now(), command, completed_command.returncode)) except subprocess.CalledProcessError as e: error_message = "{} - ERROR: {}".format(datetime.now(), str(e)) raise Exception(error_message) else: print("{} - {} file is currently up to date".format(datetime.now(), parsed_config_file['env_file_path']))
9,671
def check_input_checkpoint(input_checkpoint): """Check if input_checkpoint is a valid path or path prefix.""" if not saver_lib.checkpoint_exists(input_checkpoint): print("Input checkpoint '{}' doesn't exist!".format(input_checkpoint)) exit(-1)
9,672
def extract_features_from_html(html, depth, height): """Given an html text, extract the node based features including the descendant and ancestor ones if depth and height are respectively nonzero.""" root = etree.HTML(html.encode('utf-8')) # get the nodes, serve bytes, unicode fails if html has meta features = extract_features_from_nodes(list(root.iter()), depth, height) # add the paths to the elements for identification features.loc[:, 'path'] = pd.Series((node.getroottree().getpath(node) for node in root.iter())) return features
9,673
def __clean_field(amazon_dataset, option): """Cleanes the Text field from the datset """ clean = [] if option == 1: for i in amazon_dataset['Text']: clean.append(__one(i)) elif option == 2: for i in amazon_dataset['Summary']: clean.append(__one(i)) else: pass return clean
9,674
def write_bruker_search_path(ftype, destfile, sourcefile=None, sourcetext=None): """Will copy a file from sourcefile (out of the add_files directory) or text to destfile in first directory of Bruker search path for ftype = cpd, f1, gp, ... with checks for overwrite, identity, etc. """ if pp.run_flag not in pp.run_flags: raise Exception('unknown run_flag: ' + pp.run_flag) destfile = destfile + pp.name_tag ut.putcomment('write_bruker_search_path: start', 2) ut.putcomment('ftype: %s, destfile: %s' % (ftype, destfile), 2, ornament=False) if sourcetext and sourcefile: raise Exception('both sourcefile and sourcetext defined') if sourcetext: source = sourcetext sourcestring = 'sourcetext' ut.putcomment('input is from sourcetext', 2, ornament=False) ut.putcomment(source, 2, ornament=False) else: ut.putcomment('input is from sourcefile: ' + pp.addfiles_path + '/' + sourcefile, 2, ornament=False) sourcestring = 'sourcefile <%s>' % sourcefile f = open(os.path.join(pp.addfiles_path, sourcefile)) source = f.read() f.close() ut.putcomment(source, 3, ornament=False) (destfilefullpath, destdir) = ut.find_file_dir(destfile, ftype) if destfilefullpath: ut.putcomment('destination file exists: ' + destfilefullpath, 2, ornament=False) if not ut.cmp_text_file(source, destfilefullpath): outstring = ('PP_FILE NO_ACTION: %s equals destfile <%s>' % (sourcestring, destfilefullpath)) ut.putcomment(outstring, 1, ornament=False) pp.pp_log_fd.write('%s\n' % outstring) else: outstring = ('PP_FILE CONFLICT: %s is not equal to destfile <%s>' % (sourcestring, destfilefullpath)) ut.putcomment(outstring, 0, ornament=False) pp.pp_log_fd.write('%s\n' % outstring) if pp.run_flag == 'DRY': outstring = ('PP_FILE OVERWRITE: %s will overwrite destfile <%s>' % (sourcestring, destfilefullpath)) pp.pp_log_fd.write('%s\n' % outstring) elif pp.run_flag == 'NORMAL': raise Exception('%s\nPP_FILE NO_OVERWRITE: run_flag is %s\n' % (outstring, pp.run_flag)) elif pp.run_flag == 'FORCE': outstring = ('PP_FILE OVERWRITE: %s overwrites destfile <%s>' % (sourcestring, destfilefullpath)) ut.putcomment(outstring, 0, ornament=False) pp.pp_log_fd.write('%s\n' % outstring) ut.write_text_file(source, destfilefullpath) elif pp.run_flag == 'INTERACTIVE': raise Exception('%s\nPP_FILE NO_OVERWRITE: run_flag is %s\n' % (outstring, pp.run_flag)) else: df1 = os.path.join(destdir, destfile) outstring = ('PP_FILE CREATE: destfile <%s> from %s' % (df1, sourcestring)) ut.putcomment(outstring, 1, ornament=False) pp.pp_log_fd.write('%s\n' % outstring) if pp.run_flag == 'DRY': pass elif pp.run_flag in ['FORCE', 'NORMAL', 'INTERACTIVE']: ut.write_text_file(source, df1) else: raise Exception('unknown run_flag: ' + pp.run_flag) ut.putcomment('write_bruker_search_path: end', 2) return destfile
9,675
def _read_atom_line(line): """ COLUMNS DATATYPE FIELD DEFINITION ------------------------------------------------------------------------------------- 1 - 6 RecordName "ATOM " 7 - 11 Integer serial Atom serial number. 13 - 16 Atom name Atom name. 17 Character altLoc Alternate location indicator. 18 - 20 Residue name resName Residue name. 22 Character chainID Chain identifier. 23 - 26 Integer resSeq Residue sequence number. 27 AChar iCode Code for insertion of residues. 31 - 38 Real(8.3) x Orthogonal coordinates for X in Angstroms. 39 - 46 Real(8.3) y Orthogonal coordinates for Y in Angstroms. 47 - 54 Real(8.3) z Orthogonal coordinates for Z in Angstroms. 55 - 60 Real(6.2) occupancy Occupancy. 61 - 66 Real(6.2) tempFactor Temperature factor. 77 - 78 LString(2) element Element symbol, right-justified. 79 - 80 LString(2) charge Charge on the atom. """ lineInfo = {} lineInfo['RecordName'] = line[0:6] lineInfo['serial'] = int(line[7:12].strip()) lineInfo['name'] = line[12:16].strip() lineInfo['altLoc'] = line[16].strip() lineInfo['resName'] = line[17:21].strip() lineInfo['chainID'] = line[21].strip() lineInfo['resSeq'] = int(line[22:26].strip()) lineInfo['iCode'] = line[26].strip() try: lineInfo['position'] = np.array( [float(line[30:38]), float(line[38:46]), float(line[46:54])], ) except ValueError: raise ValueError("Invalid or missing coordinate(s)") try: lineInfo['occupancy'] = float(line[54:60]) except ValueError: lineInfo['occupancy'] = None # Rather than arbitrary zero or one if lineInfo['occupancy'] is not None and lineInfo['occupancy'] < 0: warnings.warn("Negative occupancy in one or more atoms") try: lineInfo['bfactor'] = float(line[60:66]) except ValueError: # The PDB use a default of zero if the data is missing lineInfo['bfactor'] = 0.0 lineInfo['segid'] = line[72:76].strip() lineInfo['element'] = line[76:78].strip().upper() lineInfo['charge'] = line[79:81].strip() return lineInfo
9,676
def filter_job_build_by_result(job, *, result): """filter build by build results, avaliable results are: 'SUCCESS', 'UNSTABLE', 'FAILURE', 'NOT_BUILT', 'ABORTED' see: https://javadoc.jenkins-ci.org/hudson/model/Result.html """ expect = ['SUCCESS', 'UNSTABLE', 'FAILURE', 'NOT_BUILT', 'ABORTED'] if result not in expect: raise ValueError(f'Expect one of {expect}') yield from filter(lambda build: build.result == result, job)
9,677
def semantic_parse_entity_sentence(sent: str) -> List[str]: """ @param sent: sentence to grab entities from @return: noun chunks that we consider "entities" to work with """ doc = tnlp(sent) ents_ke = textacy.ke.textrank(doc, normalize="lemma") entities = [ent for ent, _ in ents_ke] return entities
9,678
def extract_to_files(pkr_path, verbose=False): """ Extract data and image to .json and .png (if any) next to the .pkr """ title, buttons, png_data = parse_animschool_picker(pkr_path, verbose) # Save to json with open(pkr_path + '.json', 'w') as f: json.dump([title, buttons], f, indent=4) # Write PNG to file: png_path = pkr_path + '.png' if png_data and not os.path.exists(png_path): save_png(png_data, png_path) return title, buttons, png_data
9,679
def readLog(jobpath): """ Reads log to determine disk/mem usage, runtime For processing time, it will only grab the last execution/evict/terminated times. And runTime supercedes evictTime (eg. an exec->evict combination will not be written if a later exec-termination combination exists in the log) To be appended to processing database, so that structure is: ocr_processing["tag"]["jobs"] = [ {startTime: xxx, execTime: yyy, ... }, {reports from other jobs...} ] :jobid: id of the job within the submit/output directories :basedir: base directory for job output :returns: If successful, returns dict of the form jobReport = { subTime: (time of submission), execTime: (start time of latest execution), evictTime: (time of job eviction, if any), termTime: (time of job termination, if any), runTime: (time between execution start and termination/eviction time), usage: { usage dictionary from above}, } """ try: with open(jobpath + "/process.log") as file: chunk = "" subTime = None execTime = None evictTime = None termTime = None runTime = None jobReport = {} jobReport["path"] = jobpath for line in file: if line.startswith("..."): if chunk.startswith("000"): # submitted jobReport["subTime"] = parseTime(chunk.split('\n')[0]) elif chunk.startswith("001"): # executing jobReport["execTime"] = parseTime(chunk.split('\n')[0]) elif chunk.startswith("004"): # evicted, has partitionable table jobReport["evictTime"] = parseTime(chunk.split('\n')[0]) runTime = (jobReport["evictTime"] - jobReport["execTime"]) jobReport["runTime"] = runTime.days * 86400 + runTime.seconds jobReport["usage"] = parseResources(chunk) elif chunk.startswith("005"): # termination, has partitionable table jobReport["termTime"] = parseTime(chunk.split('\n')[0]) runTime = (jobReport["termTime"] - jobReport["execTime"]) jobReport["runTime"] = runTime.days * 86400 + runTime.seconds jobReport["usage"] = parseResources(chunk) elif chunk.startswith("006"): pass elif chunk.startswith("009"): pass else: if DEBUG: print "UNKNOWN CODE" print chunk chunk="" else: chunk+=line return jobReport except IOError: print "Couldn't find file at %s/process.log" % jobpath return None
9,680
def check_table_files_load(i_df, dir_context): """Used for rules 0007 and 0009 :param i_df: An investigation DataFrame :param dir_context: Path to where the investigation file is found :return: None """ for i, study_df in enumerate(i_df['studies']): study_filename = study_df.iloc[0]['Study File Name'] if study_filename != '': try: with utf8_text_file_open(os.path.join( dir_context, study_filename)) as fp: load_table_checks(fp) except FileNotFoundError: pass for j, assay_filename in enumerate( i_df['s_assays'][i]['Study Assay File Name'].tolist()): if assay_filename != '': try: with utf8_text_file_open(os.path.join( dir_context, assay_filename)) as fp: load_table_checks(fp) except FileNotFoundError: pass
9,681
def match_cam_time(events, frame_times): """ Helper function for mapping ephys events to camera times. For each event in events, we return the nearest camera frame before the event. Parameters ---------- events : 1D numpy array Events of interest. Sampled at a higher rate than frame_times. frame_times : 1D numpy array Timepoints of camera frames to be assigned to events. Sampled at a lower rate than events. """ from numpy import array output = [] for a in events: lags = array(a - frame_times) before = len(lags[lags > 0]) - 1 if before >= 0: output.append(before) return array(output)
9,682
def _glibc_version_string_ctypes() -> Optional[str]: """ Fallback implementation of glibc_version_string using ctypes. """ try: import ctypes except ImportError: return None # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen # manpage says, "If filename is NULL, then the returned handle is for the # main program". This way we can let the linker do the work to figure out # which libc our process is actually using. # # We must also handle the special case where the executable is not a # dynamically linked executable. This can occur when using musl libc, # for example. In this situation, dlopen() will error, leading to an # OSError. Interestingly, at least in the case of musl, there is no # errno set on the OSError. The single string argument used to construct # OSError comes from libc itself and is therefore not portable to # hard code here. In any case, failure to call dlopen() means we # can proceed, so we bail on our attempt. try: process_namespace = ctypes.CDLL(None) except OSError: return None try: gnu_get_libc_version = process_namespace.gnu_get_libc_version except AttributeError: # Symbol doesn't exist -> therefore, we are not linked to # glibc. return None # Call gnu_get_libc_version, which returns a string like "2.5" gnu_get_libc_version.restype = ctypes.c_char_p version_str: str = gnu_get_libc_version() # py2 / py3 compatibility: if not isinstance(version_str, str): version_str = version_str.decode("ascii") return version_str
9,683
def encode_instructions( stream: Sequence[Instruction], func_pool: List[bytes], string_pool: List[bytes], ) -> Tuple[bytearray, List[bytes], List[bytes]]: """ Encode the bytecode stream as a single `bytes` object that can be written to file or kept in memory. Parameters ---------- stream: Sequence[Instruction] The bytecode instruction objects to be encoded. func_pool: List[bytes] Where the generated bytecode for function objects is stored before being put in the final bytecode stream. string_pool: List[bytes] Where string objects are stored before being put in the final bytecode stream. Returns ------- bytes The encoded stream of bytecode instructions. It is guaranteed to have a length proportional to the length of `stream`. """ result_stream = bytearray(len(stream) * 8) for index, instruction in enumerate(stream): start = index * 8 end = start + 8 opcode_space = instruction.opcode.value.to_bytes(1, BYTE_ORDER) operand_space = encode_operands( instruction.opcode, instruction.operands, func_pool, string_pool ) operand_space = operand_space.ljust(7, b"\x00") result_stream[start:end] = opcode_space + operand_space return result_stream, func_pool, string_pool
9,684
def get_random(selector): """Return one random game""" controller = GameController return controller.get_random(MySQLFactory.get(), selector)
9,685
def get_ssh_dispatcher(connection, context): """ :param Message context: The eliot message context to log. :param connection: The SSH connection run commands on. """ @deferred_performer def perform_run(dispatcher, intent): context.bind( message_type="flocker.provision.ssh:run", command=intent.log_command_filter(intent.command), ).write() endpoint = SSHCommandClientEndpointWithTTY.existingConnection( connection, intent.command) d = Deferred() connectProtocol(endpoint, CommandProtocol( deferred=d, context=context)) return d return TypeDispatcher({ Run: perform_run, Sudo: perform_sudo, Put: perform_put, Comment: perform_comment, })
9,686
def read_ac(path, cut_off, rnalen): """Read the RNA accessibility file and output its positions and values The file should be a simple table with two columns: The first column is the position and the second one is the value '#' will be skipped """ access = [] with open(path) as f: i = 0 while i < rnalen: for line in f: line = line.split() if not line: continue elif line[0][0] == "#": continue elif len(line) < 2: continue else: v = line[1] if v == "NA": access.append(0) else: try: v = 2 ** (-float(v)) except: continue if v >= cut_off: access.append(1) else: access.append(0) i += 1 return access
9,687
def subtableD0(cxt: DecoderContext, fmt: Format): """ ORI """ fmt = FormatVI(fmt) return MNEM.ORI, [Imm(fmt.imm16, width=16, signed=False), Reg(fmt.reg1), Reg(fmt.reg2)], 2
9,688
def format_date(unix_timestamp): """ Return a standardized date format for use in the two1 library. This function produces a localized datetime string that includes the UTC timezone offset. This offset is computed as the difference between the local version of the timestamp (python's datatime.fromtimestamp) and the utc representation of the input timestamp. Args: unix_timestamp (float): a floating point unix timestamp Returns: string: A string formatted with "%Y-%m-%d %H:%M:%S %Z" """ local_datetime = datetime.fromtimestamp(unix_timestamp) utz_offset = local_datetime - datetime.utcfromtimestamp(unix_timestamp) local_date = local_datetime.replace( tzinfo=timezone(utz_offset) ).strftime("%Y-%m-%d %H:%M:%S %Z") return local_date
9,689
def _env_clear(): """ clear old extract file, parsed file and combine file. :return: null """ if os.path.isdir(BIN_FILE_PATH): shutil.rmtree(BIN_FILE_PATH) os.mkdir(BIN_FILE_PATH) if os.path.isdir(PARSED_FILE_PATH): shutil.rmtree(PARSED_FILE_PATH) os.mkdir(PARSED_FILE_PATH) if os.path.isdir(COMBINE_FILE_PATH): shutil.rmtree(COMBINE_FILE_PATH) os.mkdir(COMBINE_FILE_PATH)
9,690
def or_(kb, goals, substitutions=dict(), depth=0, mask=None, k_max=None, max_depth=1): """Base function of prover, called recursively. Calls and_, which in turn calls or_, in order to recursively calculate scores for every possible proof in proof tree. Args: kb: dict of facts / rules goals: goal to be proved substitutions: dict which contains current variable substitutions and scores of current proof path depth: current proof depth mask: mask to apply so that goal facts (which are drawn from kb) cannot be proved by unifying with themselves k_max: number of fact unifications to retain from unifications with all facts in kb max_depth: maximum allowed proof depth before termination Returns: List of proof paths of goal with corresponding scores """ proofs = [] # initialize history and substitutions as empty if substitutions == {}: substitutions['VARSUBS'] = {} substitutions['HISTORY'] = [] for struct in kb: # avoid fake added struct if struct == 'goal': continue # Check if struct order matches if len(struct[0]) != len(goals): continue rule = rule_struct_form(kb[struct], struct) head = substitute(rule[0], substitutions, kb) body = rule[1:] mask_id = None if mask is not None: mask_key, mask_id = mask mask_id = mask_id if mask_key == struct else None is_fact = len(struct) == 1 and all([not is_variable(x) for x in struct[0]]) if not is_fact and depth == max_depth: # maximum depth reached continue # rule has been applied before elif applied_before(rule, substitutions, kb): continue substitutions_copy = copy.deepcopy(substitutions) substitutions_copy['HISTORY'].append([struct, depth]) substitutions_ = unify(head, goals, substitutions_copy, kb, depth, mask_id, transpose=is_fact) if is_fact and k_max is not None: new_success, success_indices = tf.nn.top_k(substitutions_["SUCCESS"], k_max) substitutions_["SUCCESS"] = new_success for value in substitutions_['VARSUBS'].values(): if value['struct'] != 'goal' and not 'subset' in value: value['subset'] = success_indices if substitutions_ != 'FAILURE': proof = and_(kb, body, substitutions_, depth, mask, k_max=k_max, max_depth=max_depth) if not isinstance(proof, list): proof = [proof] else: proof = flatten_proofs(proof) for proof_substitutions in proof: if proof_substitutions != 'FAILURE': proofs.append(proof_substitutions) return flatten_proofs(proofs)
9,691
def print_term(thy, t): """More sophisticated printing function for terms. Handles printing of operators. Note we do not yet handle name collisions in lambda terms. """ def get_info_for_operator(t): return thy.get_data("operator").get_info_for_fun(t.head) def get_priority(t): if nat.is_binary(t) or hol_list.is_literal_list(t): return 100 # Nat atom case elif t.is_comb(): op_data = get_info_for_operator(t) if op_data is not None: return op_data.priority elif t.is_all() or logic.is_exists(t) or logic.is_if(t): return 10 else: return 95 # Function application elif t.is_abs(): return 10 else: return 100 # Atom case def helper(t, bd_vars): LEFT, RIGHT = OperatorData.LEFT_ASSOC, OperatorData.RIGHT_ASSOC # Some special cases: # Natural numbers: if nat.is_binary(t): return N(str(nat.from_binary(t))) if hol_list.is_literal_list(t): items = hol_list.dest_literal_list(t) res = N('[') + commas_join(helper(item, bd_vars) for item in items) + N(']') if hasattr(t, "print_type"): return N("(") + res + N("::") + print_type(thy, t.T) + N(")") else: return res if set.is_literal_set(t): empty_set = "∅" if settings.unicode() else "{}" if hasattr(t, "print_type"): return N("(") + N(empty_set) + N("::") + print_type(thy, t.T) + N(")") else: return N(empty_set) if logic.is_if(t): P, x, y = t.args return N("if ") + helper(P, bd_vars) + N(" then ") + helper(x, bd_vars) + \ N(" else ") + helper(y, bd_vars) if t.is_var(): return V(t.name) elif t.is_const(): if hasattr(t, "print_type") and t.print_type: return N("(" + t.name + "::") + print_type(thy, t.T) + N(")") else: return N(t.name) elif t.is_comb(): op_data = get_info_for_operator(t) # First, we take care of the case of operators if op_data and op_data.arity == OperatorData.BINARY and t.is_binop(): arg1, arg2 = t.args # Obtain output for first argument, enclose in parenthesis # if necessary. if (op_data.assoc == LEFT and get_priority(arg1) < op_data.priority or op_data.assoc == RIGHT and get_priority(arg1) <= op_data.priority): str_arg1 = N("(") + helper(arg1, bd_vars) + N(")") else: str_arg1 = helper(arg1, bd_vars) if settings.unicode() and op_data.unicode_op: str_op = N(' ' + op_data.unicode_op + ' ') else: str_op = N(' ' + op_data.ascii_op + ' ') # Obtain output for second argument, enclose in parenthesis # if necessary. if (op_data.assoc == LEFT and get_priority(arg2) <= op_data.priority or op_data.assoc == RIGHT and get_priority(arg2) < op_data.priority): str_arg2 = N("(") + helper(arg2, bd_vars) + N(")") else: str_arg2 = helper(arg2, bd_vars) return str_arg1 + str_op + str_arg2 # Unary case elif op_data and op_data.arity == OperatorData.UNARY: if settings.unicode() and op_data.unicode_op: str_op = N(op_data.unicode_op) else: str_op = N(op_data.ascii_op) if get_priority(t.arg) < op_data.priority: str_arg = N("(") + helper(t.arg, bd_vars) + N(")") else: str_arg = helper(t.arg, bd_vars) return str_op + str_arg # Next, the case of binders elif t.is_all(): all_str = "!" if not settings.unicode() else "∀" if hasattr(t.arg, "print_type"): var_str = B(t.arg.var_name) + N("::") + print_type(thy, t.arg.var_T) else: var_str = B(t.arg.var_name) body_repr = helper(t.arg.body, [t.arg.var_name] + bd_vars) return N(all_str) + var_str + N(". ") + body_repr elif logic.is_exists(t): exists_str = "?" if not settings.unicode() else "∃" if hasattr(t.arg, "print_type"): var_str = B(t.arg.var_name) + N("::") + print_type(thy, t.arg.var_T) else: var_str = B(t.arg.var_name) body_repr = helper(t.arg.body, [t.arg.var_name] + bd_vars) return N(exists_str) + var_str + N(". ") + body_repr # Function update elif function.is_fun_upd(t): f, upds = function.strip_fun_upd(t) upd_strs = [helper(a, bd_vars) + N(" := ") + helper(b, bd_vars) for a, b in upds] return N("(") + helper(f, bd_vars) + N(")(") + commas_join(upd_strs) + N(")") # Finally, usual function application else: if get_priority(t.fun) < 95: str_fun = N("(") + helper(t.fun, bd_vars) + N(")") else: str_fun = helper(t.fun, bd_vars) if get_priority(t.arg) <= 95: str_arg = N("(") + helper(t.arg, bd_vars) + N(")") else: str_arg = helper(t.arg, bd_vars) return str_fun + N(" ") + str_arg elif t.is_abs(): lambda_str = "%" if not settings.unicode() else "λ" if hasattr(t, "print_type"): var_str = B(t.var_name) + N("::") + print_type(thy, t.var_T) else: var_str = B(t.var_name) body_repr = helper(t.body, [t.var_name] + bd_vars) return N(lambda_str) + var_str + N(". ") + body_repr elif t.is_bound(): if t.n >= len(bd_vars): raise OpenTermException else: return B(bd_vars[t.n]) else: raise TypeError() t = copy(t) # make copy here, because infer_printed_type may change t. infertype.infer_printed_type(thy, t) res = helper(t, []) if settings.highlight(): res = optimize_highlight(res) return res
9,692
def build_model(): """Builds the model.""" return get_model()()
9,693
def truncate_field_data(model, data): """Truncate all data fields for model by its ``max_length`` field attributes. :param model: Kind of data (A Django Model instance). :param data: The data to truncate. """ fields = dict((field.name, field) for field in model._meta.fields) return dict((name, truncate_by_field(fields[name], value)) for name, value in data.items())
9,694
def get_pools(web3): """Iterator over all pools. Returns tuples like (token_address, pool_address).""" router = web3.eth.contract(address=_VETHER_ROUTER_ADDRESS, abi=router_vether_abi) for index in range(router.functions.tokenCount().call()): token = router.functions.getToken(index).call() pool_address = router.functions.getPool(token).call() yield token, pool_address
9,695
def plot_sn(filenames, sn_spectra, wave, idrfilenames, outfname): """Return a figure with the SN Parameters ---------- fname : str Output file name """ sn_max = sn_spectra.max() day_exp_nums = [fname.split('_')[1:4] for fname in filenames] phase_strings = [fname.split('_')[-2] for fname in idrfilenames] print(phase_strings) phases = [((-1 if phase_string[0] == 'M' else 1) * float(phase_string[1:])/1000.) for phase_string in phase_strings] phase_sort = np.array(phases).argsort() fig = plt.figure(figsize=(7,8)) for p, phase_arg in enumerate(phase_sort): file = idrfilenames[phase_arg] phase = phases[phase_arg] with fitsio.FITS(file, 'r') as f: header = f[0].read_header() data = f[0].read() variance = f[1].read() n = header["NAXIS1"] #crpix = header["CRPIX1"]-1.0 # FITS is 1-indexed, numpy as 0-indexed crval = header["CRVAL1"] cdelt = header["CDELT1"] sn_wave = crval + cdelt * (np.arange(n)) # - crpix) file_day_exp = header["FILENAME"].split('_')[1:4] i_t_match = np.flatnonzero(np.array([day_exp == file_day_exp for day_exp in day_exp_nums])) plt.plot(sn_wave, data/sn_max + p/2., color='k') for i_t in i_t_match: plt.plot(wave, sn_spectra[i_t]/sn_max + p/2., color='r') plt.text(sn_wave[-20], p/2., 'Phase = '+str(phase)) plt.savefig(outfname) plt.close()
9,696
def append_after(filename="", search_string="", new_string=""): """appends "new_string" after a line containing "search_string" in "filename" """ with open(filename, 'r', encoding='utf-8') as f: line_list = [] while True: line = f.readline() if line == "": break line_list.append(line) if search_string in line: line_list.append(new_string) with open(filename, 'w', encoding='utf-8') as f: f.writelines(line_list)
9,697
def get_all_students(zip): """Returns student tuple for all zipped submissions found in the zip file.""" students = [] # creating all the student objects that we can zip files of for filename in zip.namelist(): if not filename.endswith(".zip"): continue firstname, surname = split_zipname(filename) student_zip_data = io.BytesIO(zip.open(filename).read()) student_zipfile = zf.ZipFile(student_zip_data) students.append(Student(firstname, surname, student_zipfile)) return students
9,698
def is_shipping_method_applicable_for_postal_code( customer_shipping_address, method ) -> bool: """Return if shipping method is applicable with the postal code rules.""" results = check_shipping_method_for_postal_code(customer_shipping_address, method) if not results: return True if all( map( lambda rule: rule.inclusion_type == PostalCodeRuleInclusionType.INCLUDE, results.keys(), ) ): return any(results.values()) if all( map( lambda rule: rule.inclusion_type == PostalCodeRuleInclusionType.EXCLUDE, results.keys(), ) ): return not any(results.values()) # Shipping methods with complex rules are not supported for now return False
9,699