content
stringlengths
22
815k
id
int64
0
4.91M
def _kubeconfig_impl(repository_ctx): """Find local kubernetes certificates""" # find and symlink kubectl kubectl = repository_ctx.which("kubectl") if not kubectl: fail("Unable to find kubectl executable. PATH=%s" % repository_ctx.path) repository_ctx.symlink(kubectl, "kubectl") # TODO: figure out how to use BUILD_USER if "USER" in repository_ctx.os.environ: user = repository_ctx.os.environ["USER"] else: exec_result = repository_ctx.execute(["whoami"]) if exec_result.return_code != 0: fail("Error detecting current user") user = exec_result.stdout.rstrip() token = None ca_crt = None kubecert_cert = None kubecert_key = None server = repository_ctx.attr.server # check service account first serviceaccount = repository_ctx.path("/var/run/secrets/kubernetes.io/serviceaccount") if serviceaccount.exists: ca_crt = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" token_file = serviceaccount.get_child("token") if token_file.exists: exec_result = repository_ctx.execute(["cat", token_file.realpath]) if exec_result.return_code != 0: fail("Error reading user token") token = exec_result.stdout.rstrip() # use master url from the environemnt if "KUBERNETES_SERVICE_HOST" in repository_ctx.os.environ: server = "https://%s:%s" % ( repository_ctx.os.environ["KUBERNETES_SERVICE_HOST"], repository_ctx.os.environ["KUBERNETES_SERVICE_PORT"], ) else: # fall back to the default server = "https://kubernetes.default" else: home = repository_ctx.path(repository_ctx.os.environ["HOME"]) certs = home.get_child(".kube").get_child("certs") ca_crt = certs.get_child("ca.crt").realpath kubecert_cert = certs.get_child("kubecert.cert") kubecert_key = certs.get_child("kubecert.key") # config set-cluster {cluster} \ # --certificate-authority=... \ # --server=https://dev3.k8s.tubemogul.info:443 \ # --embed-certs", _kubectl_config(repository_ctx, [ "set-cluster", repository_ctx.attr.cluster, "--server", server, "--certificate-authority", ca_crt, ]) # config set-credentials {user} --token=...", if token: _kubectl_config(repository_ctx, [ "set-credentials", user, "--token", token, ]) # config set-credentials {user} --client-certificate=... --embed-certs", if kubecert_cert and kubecert_cert.exists: _kubectl_config(repository_ctx, [ "set-credentials", user, "--client-certificate", kubecert_cert.realpath, ]) # config set-credentials {user} --client-key=... --embed-certs", if kubecert_key and kubecert_key.exists: _kubectl_config(repository_ctx, [ "set-credentials", user, "--client-key", kubecert_key.realpath, ]) # export repostory contents repository_ctx.file("BUILD", """exports_files(["kubeconfig", "kubectl"])""", False) return { "cluster": repository_ctx.attr.cluster, "server": repository_ctx.attr.server, }
13,700
def getPrimaryHostIp(): """ Tries to figure out the primary (the one with default route), local IPv4 address. Returns the IP address on success and otherwise '127.0.0.1'. """ # # This isn't quite as easy as one would think. Doing a UDP connect to # 255.255.255.255 turns out to be problematic on solaris with more than one # network interface (IP is random selected it seems), as well as linux # where we've seen 127.0.1.1 being returned on some hosts. # # So a modified algorithm first try a known public IP address, ASSUMING # that the primary interface is the one that gets us onto the internet. # If that fails, due to routing or whatever, we try 255.255.255.255 and # then finally hostname resolution. # sHostIp = getPrimaryHostIpByUdp('8.8.8.8'); if sHostIp.startswith('127.'): sHostIp = getPrimaryHostIpByUdp('255.255.255.255'); if sHostIp.startswith('127.'): sHostIp = getPrimaryHostIpByHostname(); return sHostIp;
13,701
def run(raw_args): """ Parse arguments in parameter. Then call the function registered in the argument parser which matches them. :param raw_args: :return: """ if "--version" in raw_args: print("version: ", __version__) return error.ReturnCode.success.value parser = build_cli_interface() args = parser.parse_args() if args.v: logger.set_global_level(INFO) if args.vv: logger.set_global_level(DEBUG) if args.quiet: logger.disable_logs() if "func" in args: try: args.func(args) except error.ConfigError as e: logger.LOGGER.error(e) return error.ReturnCode.config_error.value except error.ArtefactError as e: logger.LOGGER.error(e) return error.ReturnCode.artefact_error.value except error.ExpressionError as e: logger.LOGGER.error(e) return error.ReturnCode.expression_error.value except IOError as e: logger.LOGGER.error(e) return error.ReturnCode.artefact_error.value except botocore.exceptions.ClientError as e: logger.LOGGER.error("S3 error: %s" % e) return error.ReturnCode.s3_error.value except KeyboardInterrupt: logger.LOGGER.info("Interrupted") return error.ReturnCode.success.value
13,702
def localize(_bot, _msg, *args, _server=None, _channel=None, **kwargs): """ Localize message to current personality, if it supports it. """ global messages # Find personality and check if personality has an alternative for message. personality = config.get('personality', _server or _current_server, _channel or _current_channel) if personality and personality in messages_ and _msg in messages_[personality]: # Replace message. _msg = messages_[personality][_msg] kw = _bot.FORMAT_CODES.copy() kw.update(kwargs) return _msg.format(*args, **kw)
13,703
def test_neighbors_valid(sample_graph): """Test that neighbors returns node connections.""" sample_graph[2].add_edge('A', 'B') sample_graph[2].add_edge('A', 'C') sample_graph[2].add_edge('A', 'D') assert sample_graph[2].neighbours('A') == ['B', 'C', 'D']
13,704
def parse_arguments(): """ Parse the argument list and return the location of a geometry file, the location of a data file, whether or not to save images with a timestamp of the four default plot windows and the VisIt session file in the current directory, and whether or not to open the session file in VisIt. Input: ______ none Returns: ________ args: Namespace User supplied geometry file location, data file location, and indication if the user wants images of the plot windows with a timestamp and the session file saved and opened in VisIt. """ parser = argparse.ArgumentParser(description="Create default VisIt output.") parser.add_argument("geofile", type=str, help="Provide a path to the geometry file." ) parser.add_argument("datafile", type=str, help="Provide a path to the data file." ) parser.add_argument("-i", "--images", action="store_true", help="Indicate whether to save images of plot windows." ) parser.add_argument("-t", "--timestamp", action="store_true", help="Indicate whether to remove the timestamp from images." ) parser.add_argument("-s", "--sessionfile", action="store_true", help="Indicate whether to save the VisIt session file." ) parser.add_argument("-v", "--openvisit", action="store_false", help="Indicate whether to open the session file in VisIt." ) args = parser.parse_args() return args
13,705
def aborting_function(): """There is a 50% chance that this function will AbortAndRestart or complete successfully. The 50% chance simply represents a process that will fail half the time and succeed half the time. """ import random logging.info('In aborting_function') if random.random() < .5: from furious.errors import AbortAndRestart logging.info('Getting ready to restart') # Raise AbortAndRestart like an Exception, and watch the magic happen. raise AbortAndRestart() logging.info('No longer restarting')
13,706
def _print_metrics(metrics): """Print one metrics row and save it.""" time_label = metrics.index.get_level_values('Dataset')[0] global _metrics if time_label not in _metrics: _metrics[time_label] = pd.DataFrame() _metrics[time_label] = _metrics[time_label].append(metrics) local_metrics = _metrics[time_label].copy() local_metrics = _reverse_order_within_system_groups(local_metrics) local_metrics = local_metrics[evaluationutils.COLUMNS] evaluationutils.print_metrics( local_metrics, time_label + '_results', append_global=False)
13,707
def snapshot(source, destination): """Convert a possibly COW layered disk file into a snapshot.""" processutils.execute( 'qemu-img convert --force-share -O qcow2 %s %s' % (source, destination), shell=True)
13,708
def list_events(): """Show a view with past and future events.""" if "username" not in session: return redirect("/") events = actions.get_upcoming_events() past_events = actions.get_past_events() return render_template("events.html", count=len(events), past_count=len(past_events), events=events, past_events=past_events, events_view=True, mode="3")
13,709
def create_parser(): """Create argparser.""" parser = argparse.ArgumentParser() parser.add_argument( '--mode', default='local', choices=['local', 'docker']) parser.add_argument( '--env-file', action="append", help='Job specific environment file') parser.add_argument( '--image-family', help='The image family from which to fetch the latest image') parser.add_argument( '--image-project', help='The image project from which to fetch the test images') parser.add_argument( '--aws', action='store_true', help='E2E job runs in aws') parser.add_argument( '--aws-ssh', default=os.environ.get('JENKINS_AWS_SSH_PRIVATE_KEY_FILE'), help='Path to private aws ssh keys') parser.add_argument( '--aws-pub', default=os.environ.get('JENKINS_AWS_SSH_PUBLIC_KEY_FILE'), help='Path to pub aws ssh key') parser.add_argument( '--aws-cred', default=os.environ.get('JENKINS_AWS_CREDENTIALS_FILE'), help='Path to aws credential file') parser.add_argument( '--gce-ssh', default=os.environ.get('JENKINS_GCE_SSH_PRIVATE_KEY_FILE'), help='Path to .ssh/google_compute_engine keys') parser.add_argument( '--gce-pub', default=os.environ.get('JENKINS_GCE_SSH_PUBLIC_KEY_FILE'), help='Path to pub gce ssh key') parser.add_argument( '--service-account', default=os.environ.get('GOOGLE_APPLICATION_CREDENTIALS'), help='Path to service-account.json') parser.add_argument( '--mount-paths', action='append', help='Paths that should be mounted within the docker container in the form local:remote') parser.add_argument( '--build', nargs='?', default=None, const='', help='Build kubernetes binaries if set, optionally specifying strategy') parser.add_argument( '--cluster', default='bootstrap-e2e', help='Name of the cluster') parser.add_argument( '--docker-in-docker', action='store_true', help='Enable run docker within docker') parser.add_argument( '--kubeadm', choices=['ci', 'periodic', 'pull']) parser.add_argument( '--tag', default='v20170707-6440bde9', help='Use a specific kubekins-e2e tag if set') parser.add_argument( '--test', default='true', help='If we need to run any actual test within kubetest') parser.add_argument( '--down', default='true', help='If we need to tear down the e2e cluster') parser.add_argument( '--up', default='true', help='If we need to bring up a e2e cluster') parser.add_argument( '--kubetest_args', action='append', default=[], help='Send unrecognized args directly to kubetest') return parser
13,710
def get_duration(df): """Get duration of ECG recording Args: df (DataFrame): DataFrame with time/voltage data Returns: float: duration of ECG recording """ start = df.time.iloc[0] end = df.time.iloc[-1] duration = end - start return duration
13,711
def conv_seq_labels(xds, xhs): """description and hedlines are converted to padded input vectors. headlines are one-hot to label""" batch_size = len(xhs) assert len(xds) == batch_size def process_xdxh(xd,xh): concated_xd = xd+[[3]]+xh padded_xd = lpadd(concated_xd,maxlend) concated_xdxh = concat_output(padded_xd) return vocab_fold_list(concated_xdxh) x_raw = [process_xdxh(xd,xh) for xd,xh in zip(xds,xhs)] # the input does not have 2nd eos x = np.asarray([sequence.pad_sequences(_x, maxlen=maxlen, value=empty, padding='post', truncating='post') for _x in x_raw]) #x = flip_headline(x, nflips=nflips, model=model, debug=debug) def padeod_xh(xh): if [2] in xh: return xh+[[0]] else: return xh+[[2]] y = np.zeros((batch_size, maxhighs+1, maxlenh, vocab_size)) xhs_fold = [vocab_fold_list(padeod_xh(xh)) for xh in xhs] def process_xh(xh): if sum(xh)>0: xh_pad = xh + [eos] + [empty]*maxlenh # output does have a eos at end else: xh_pad = xh + [empty]*maxlenh xh_truncated = xh_pad[:maxlenh] return np_utils.to_categorical(xh_truncated, vocab_size) for i, xh in enumerate(xhs_fold): y[i,:,:,:] = np.asarray([process_xh(xh) for xh in xhs_fold[i]]) return x, y.reshape((batch_size,(maxhighs+1)*maxlenh,vocab_size))
13,712
def create_task(): """Create new post""" global post_id_counter body = json.loads(request.data) title = body.get("title") link = body.get("link") username = body.get("username") if not title or not link or not username: return json.dumps({"error": "Missing fields in the body"}), 400 post = { "id": post_id_counter, "upvotes": 1, "title": title, "link": link, "username": username, "comments": {} } posts[post_id_counter] = post post_id_counter += 1 return json.dumps(post), 201
13,713
def _asklong(*args): """_asklong(sval_t value, char format, v(...) ?) -> int""" return _idaapi._asklong(*args)
13,714
def describe_bivariate(data:pd.DataFrame, only_dependent:bool = False, size_max_sample:int = None, is_remove_outliers:bool = True, alpha:float = 0.05, max_num_rows:int = 5000, max_size_cats:int = 5, verbose:bool = False)->pd.DataFrame: """ Describe bivariate relationships. df -- data to be analized. only_dependent -- only display relationships with dependeces (default, False). size_max_sample -- maximum sample size to apply analysis with whole sample. If this value is not None are used random subsamples although it will not remove bivariate outliers (default, None). is_remove_outliers -- Remove or not univariate outliers (default, True). alpha -- significance level (default, 0.05). max_num_rows -- maximum number of rows allowed without considering a sample (default, 5000). max_size_cats -- maximum number of possible values in a categorical variable to be allowed (default, 5). return -- results in a table. """ # data preparation df = preparation(data, max_num_rows, max_size_cats, verbose = True) # relationship num - num dfnn = analysis_num_num(df, only_dependent = only_dependent, size_max_sample = size_max_sample, is_remove_outliers = is_remove_outliers, alpha = alpha, verbose = verbose) # relationship cat - cat dfcc = analysis_cat_cat(df, only_dependent = only_dependent, alpha = alpha, verbose = verbose) # relationship cat - num dfcn = analysis_cat_num(df, only_dependent = only_dependent, alpha = alpha, is_remove_outliers = is_remove_outliers, verbose = verbose) # append results dfbiv = dfnn.copy() dfbiv = dfbiv.append(dfcc) dfbiv = dfbiv.append(dfcn) # return return dfbiv
13,715
def pip( args, path='pip', use_sudo=False ): """ Run pip. :param args: a string or sequence of strings to be passed to pip as command line arguments. If given a sequence of strings, its elements will be quoted if necessary and joined with a single space in between. :param path: the path to pip :param use_sudo: whther to run pip as sudo """ if isinstance( args, (str, unicode) ): command = path + ' ' + args else: command = join_argv( concat( path, args ) ) # Disable pseudo terminal creation to prevent pip from spamming output with progress bar. kwargs = Expando( pty=False ) if use_sudo: f = sudo # Set HOME so pip's cache doesn't go into real user's home, potentially creating files # not owned by that user (older versions of pip) or printing a warning about caching # being disabled. kwargs.sudo_args = '-H' else: f = run f( command, **kwargs )
13,716
def launch(cmd, args=None, separate_terminal=False, in_color='cyan', silent=False, should_wait=True): """ Launch a system command :param cmd: The command to run :param args: The arguments to pass to that command (a str list) :param separate_terminal: Should we open a new terminal window :param in_color: The color to output :param silent: Echo the system command to the current stdout? :param should_wait: In the case of a separate terminal, should we wait for that to finish? :return: The error code returned from the command. If not wait to complete, this will only return 0. """ if args is None: args = [] args_in = [cmd] if separate_terminal or not should_wait: pre_args = ['start'] if should_wait: pre_args.append('/wait') pre_args.append(cmd) pre_args.extend(args) args_in = pre_args else: args_in.extend(args) if not silent: click.secho(' '.join(args_in), fg=in_color) return subprocess.call(args_in, shell=separate_terminal or not should_wait)
13,717
def random_neuron_index(args): """ Sort the filters randomly. """ total_neuron_nums = args.total_neuron_nums record_filters_folder = args.record_filters_folder os.makedirs(record_filters_folder, exist_ok=True) save_noisy_filter_txt = os.path.join(record_filters_folder, 'noise_index.txt') random_noisy_location = list(range(0, total_neuron_nums)) random.shuffle(random_noisy_location) np.savetxt(save_noisy_filter_txt, random_noisy_location, delimiter=',', fmt='%d') save_blurry_filter_txt = os.path.join(record_filters_folder, 'blur_index.txt') random_blurry_location = list(range(0, total_neuron_nums)) random.shuffle(random_blurry_location) np.savetxt(save_blurry_filter_txt, random_blurry_location, delimiter=',', fmt='%d')
13,718
def create_feature_from_area(train_df, test_df): """ One more variable from floor area could be the difference between full area and living area. """ train_df["extra_sq"] = train_df["full_sq"] - train_df["life_sq"] test_df["extra_sq"] = test_df["full_sq"] - test_df["life_sq"]
13,719
def devilry_multiple_examiners_short_displayname(assignment, examiners, devilryrole): """ Returns the examiners wrapped in HTML formatting tags perfect for showing the examiners inline in a non-verbose manner. Typically used for showing all the examiners in an :class:`devilry.apps.core.models_group.AssignmentGroup`. Handles anonymization based on ``assignment.anonymizationmode`` and ``devilryrole``. Args: assignment: A :class:`devilry.apps.core.models.Assignment` object. The ``assignment`` should be the assignment where the examiners belongs. examiners: An iterable of :class:`devilry.apps.core.models.Examiner` objects. devilryrole: See :meth:`devilry.apps.core.models.Assignment.examiners_must_be_anonymized_for_devilryrole`. """ return { 'assignment': assignment, 'examiners': examiners, 'devilryrole': devilryrole, }
13,720
def main(): """If used as the main module, this method parses the arguments and calls copy or upload""" parser = argparse.ArgumentParser( description='Copy or upload field descriptions for BigQuery tables/views') parser.add_argument('mode', type=str, choices=['desccopy', 'descupload']) parser.add_argument('--source', action='store', help='fully-qualified source table ID') parser.add_argument('--target', action='store', help='fully-qualified target table ID', required=True) parser.add_argument('--csv_path', action='store', help='path for the csv file') parser.add_argument('--debug', action='store_true', help='set debug mode on, default is false') args = parser.parse_args() if args.mode == 'copy' and not args.source: parser.error('source table id is missing for copy') elif args.mode == 'upload' and not args.csv_path: parser.error('csv path is missing for upload') log_level = logging.DEBUG if args.debug else logging.INFO logging.basicConfig(stream=sys.stdout, level=log_level, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') client = bigquery.Client() description_manager = BigQueryDescriptionManager(client) if args.mode == 'desccopy': description_manager.copy_field_descriptions(args.source, args.target) elif args.mode == 'descupload': description_manager.upload_field_descriptions(args.csv_path, args.target)
13,721
def colormap_with_fixed_hue(color, N=10): """Create a linear colormap with fixed hue Parameters ---------- color: tuple color that determines the hue N: int, optional number of colors used in the palette """ import seaborn from matplotlib.colors import LinearSegmentedColormap from matplotlib.colors import rgb_to_hsv, hsv_to_rgb, hex2color color_hsv = rgb_to_hsv(hex2color(color)) base = seaborn.color_palette("Blues", 10) base_hsv = np.array(list(map(rgb_to_hsv, base))) h, s, v = base_hsv.T h_fixed = np.ones_like(h) * color_hsv[0] color_array = np.array(list(map( hsv_to_rgb, np.vstack([h_fixed, s * color_hsv[1], v]).T))) return LinearSegmentedColormap.from_list("mycmap", color_array)
13,722
def get_news_blacklist() -> list: """Get the users news blacklist from news-blacklist.json. Returns: list: List of blacklisted news article titles """ try: with open("news-blacklist.json", encoding="utf-8") as file: log.info("Getting news blacklist from news-blacklist.json") user_blacklist = json.load(file) except FileNotFoundError: log.warning("No news-blacklist.json found, creating a new one") user_blacklist = {"blacklist": []} with open("news-blacklist.json", "w", encoding="utf-8") as file: json.dump(user_blacklist, file) return user_blacklist["blacklist"]
13,723
def calc_triangular_number(n: int): """ A triangular number or triangle number counts objects arranged in an equilateral triangle. More info: https://www.mathsisfun.com/algebra/triangular-numbers.html :param n: :return: """ return int((n * (n + 1)) / 2)
13,724
def assign_task_command(username, task_count): """Print FILENAME.""" db = get_db() user = db.execute( 'SELECT * FROM user WHERE username = ?', (username,) ).fetchone() if user is None: click.echo("The user doesn't exist.") return selected_tasks = get_unassigned_task(user['id'], task_count) for x in selected_tasks: click.echo(x.keys()) db.execute( 'INSERT INTO assigned (user_id, datapoint_id) VALUES (?, ?)', (user['id'], x['id']) ) db.commit() click.echo(f"{len(selected_tasks)} tasks has been assigned to {username}.")
13,725
def wrap_keepdims(func): """ Check that output have same dimensions as input. """ # TODO : check if it's working @wraps(func) def check_keepdims(X, *args, keepdims=False, **kwargs): if keepdims: out = func(X, *args, **kwargs) return out.reshape(out.shape + (1,)) return func(X, *args, **kwargs) return check_keepdims
13,726
def get_headers(cred=None, filename=None): """Return headers for basic HTTP authentication. Returns: str: Basic authorization header, including Base64 encoded username and password. """ return { "Authorization": "Basic {}".format( get_base64(cred=cred, filename=filename, api="reporting") ) }
13,727
def create_xml_content( segmentation: list[dict], lang_text: list[str], split: str, src_lang: str, tgt_lang: str, is_src: bool, ) -> list[str]: """ Args: segmentation (list): content of the yaml file lang_text (list): content of the transcription or translation txt file split (str): the split name src_lang (str): source language id tgt_lang (str): target language id is_src (bool): whether lang_text is transcriptions Returns: xml_content (list) """ xml_content = [] xml_content.append('<?xml version="1.0" encoding="UTF-8"?>') xml_content.append("<mteval>") if is_src: xml_content.append(f'<srcset setid="{split}" srclang="{src_lang}">') else: xml_content.append( f'<refset setid="{split}" srclang="{src_lang}" trglang="{tgt_lang}" refid="ref">' ) prev_talk_id = -1 for sgm, txt in zip(segmentation, lang_text): talk_id = sgm["wav"].split(".wav")[0] if prev_talk_id != talk_id: if prev_talk_id != -1: xml_content.append("</doc>") # add content (some does not matter, but is added to replicate the required format) xml_content.append(f'<doc docid="{talk_id}" genre="lectures">') xml_content.append("<keywords>does, not, matter</keywords>") xml_content.append("<speaker>Someone Someoneson</speaker>") xml_content.append(f"<talkid>{talk_id}</talkid>") xml_content.append("<description>Blah blah blah.</description>") xml_content.append("<title>Title</title>") seg_id = 0 prev_talk_id = talk_id seg_id += 1 xml_content.append(f'<seg id="{seg_id}">{txt}</seg>') xml_content.append("</doc>") if is_src: xml_content.append("</srcset>") else: xml_content.append("</refset>") xml_content.append("</mteval") return xml_content
13,728
def virtualenv(directory, local=False): """ Context manager to activate an existing Python `virtual environment`_. :: from fabric.api import run from fabtools.python import virtualenv with virtualenv('/path/to/virtualenv'): run('python -V') .. _virtual environment: http://www.virtualenv.org/ """ path_mod = os.path if local else posixpath # Build absolute path to the virtualenv activation script venv_path = abspath(directory) activate_path = path_mod.join(venv_path, 'bin', 'activate') # Source the activation script with prefix('. %s' % quote(activate_path)): yield
13,729
def add_deployment(directory, name, templates_dir='templates', deployment_dir='deployment', mode=0777): """ Adds new deployment if not exists """ context = { 'datetime': datetime.datetime.now(), 'name': name, 'project_name': get_project_name(directory) } dd, df = get_deployment_info(directory, name) if df.exists(): raise ExistingDeploymentError() # create deployments directory df.parent.mkdir(parents=True, mode=mode) # write deployment file df.write_file( get_rendered_template('deployment.py', context) ) top_td = Path(__file__).parent.child(templates_dir) td = top_td.child(deployment_dir) for tf in td.walk(): if tf.isdir(): continue partitioned = tf.partition(td) target = Path(dd, Path(partitioned[2][1:])) target_dir = target.parent if not target_dir.exists(): target_dir.mkdir(parents=True, mode=mode) tmp = tf.partition(top_td)[2][1:] rendered = get_rendered_template(tmp, context) target.write_file(rendered)
13,730
def swarm_post_uptest(uptest_results, swarm_id, swarm_trace_id): """ Chord callback that runs after uptests have completed. Checks that they were successful, and then calls routing function. """ logger.info("[%s] Swarm %s post uptests", swarm_trace_id, swarm_id) # uptest_results will be a list of tuples in form (host, results), where # 'results' is a list of dictionaries, one for each test script. swarm = Swarm.objects.get(id=swarm_id) test_counter = 0 for host_results in uptest_results: if isinstance(host_results, Exception): raise host_results _host, proc_results = host_results # results is now a dict for proc, results in proc_results.items(): for result in results: test_counter += 1 # This checking/formatting relies on each uptest result being a # dict with 'Passed', 'Name', and 'Output' keys. if result['Passed'] is not True: msg = (proc + ": {Name} failed:" "{Output}".format(**result)) send_event(str(swarm), msg, tags=['failed', 'uptest'], swarm_id=swarm_trace_id) raise FailedUptest(msg) # Don't congratulate swarms that don't actually have any uptests. if test_counter > 0: send_event("Uptests passed", 'Uptests passed for swarm %s' % swarm, tags=['success', 'uptest'], swarm_id=swarm_trace_id) else: send_event("No uptests!", 'No uptests for swarm %s' % swarm, tags=['warning', 'uptest'], swarm_id=swarm_trace_id) # Also check for captured failures in the results correct_nodes = set( '%s:%s' % (host, procname.split('-')[-1]) for host, results in uptest_results # results is now a dictionary keyed by procname for procname in results ) callback = swarm_cleanup.subtask((swarm_id, swarm_trace_id)) swarm_route.delay(swarm_id, list(correct_nodes), callback, swarm_trace_id=swarm_trace_id)
13,731
def extractall(fzip, dest, desc="Extracting"): """zipfile.Zipfile(fzip).extractall(dest) with progress""" dest = Path(dest).expanduser() with ZipFile(fzip) as zipf, tqdm( desc=desc, unit="B", unit_scale=True, unit_divisor=1024, total=sum(getattr(i, "file_size", 0) for i in zipf.infolist()), ) as pbar: for i in zipf.infolist(): if not getattr(i, "file_size", 0): # directory zipf.extract(i, fspath(dest)) else: (dest / i.filename).parent.mkdir(parents=True, exist_ok=True) with zipf.open(i) as fi, (dest / i.filename).open(mode="wb") as fo: copyfileobj(CallbackIOWrapper(pbar.update, fi), fo) mode = (i.external_attr >> 16) & 0o777 if mode: (dest / i.filename).chmod(mode) log.debug(oct((i.external_attr >> 16) & 0o777))
13,732
def play(playbook, user, inventory=SITE_INVENTORY, sudo=True, ask_pass=False, ask_sudo_pass=True, ask_vault_pass=True, verbose=False, extra=None, extra_vars=None, key=None, limit=None, tags=None, list_tasks=False): """Run a playbook. Defaults to using the "hosts" inventory""" print('[invoke] Playing {0!r} on {1!r} with user {2!r}...'.format( playbook, inventory, user) ) # If private key not provided, take a good guess if not key: if user == 'vagrant': key = '~/.vagrant.d/insecure_private_key' else: key = '~/.ssh/id_rsa' cmd = 'ansible-playbook {playbook} -i {inventory} -u {user}'.format(**locals()) if sudo: cmd += ' -s' if ask_pass: cmd += ' --ask-pass' if ask_sudo_pass: cmd += ' --ask-sudo-pass' if ask_vault_pass: cmd += ' --ask-vault-pass' if verbose: cmd += ' -vvvv' if limit: cmd += ' --limit={0}'.format(limit) if key: cmd += ' --private-key={0}'.format(key) if extra: cmd += ' -e {0!r}'.format(extra) if extra_vars: cmd += ' -e "{}"'.format(extra_vars) if tags: cmd += ' --tags={0!r}'.format(tags) if list_tasks: cmd += ' --list-tasks' run(cmd, echo=True, pty=True)
13,733
def style_string(string: str, fg=None, stylename=None, bg=None) -> str: """Apply styles to text. It is able to change style (like bold, underline etc), foreground and background colors of text string.""" ascii_str = _names2ascii(fg, stylename, bg) return "".join(( ascii_str, string, _style_dict["reset"]))
13,734
def select_all_genes(): """ Select all genes from SQLite database """ query = """ SELECT GENE_SYMBOL, HGNC_ID, ENTREZ_GENE_ID, ENSEMBL_GENE, MIM_NUMBER FROM GENE """ cur = connection.cursor() cur.execute(query) rows = cur.fetchall() genes = [] for row in rows: omim = row[4].split(';') if row[4] != "None" else [] gene = Gene(gene_symbol=row[0], hgnc_id=row[1], entrez_gene_id=row[2], ensembl_gene=row[3], omim=omim) genes.append(gene) cur.close() return genes
13,735
def test_enum_handler(params): """ 测试枚举判断验证 """ return json_resp(params)
13,736
def get_staff_timetable(url, staff_name): """ Get Staff timetable via staff name :param url: base url :param staff_name: staff name string :return: a list of dicts """ url = url + 'TextSpreadsheet;Staff;name;{}?template=SWSCUST+Staff+TextSpreadsheet&weeks=1-52' \ '&days=1-7&periods=1-32&Width=0&Height=0'.format(staff_name) course_list, name = extract_text_spread_sheet(url, lambda _: False) for course in course_list: course['Name of Type'] = course['Module'] course['Module'] = course['Description'] return course_list, name
13,737
def find_ccs(unmerged): """ Find connected components of a list of sets. E.g. x = [{'a','b'}, {'a','c'}, {'d'}] find_cc(x) [{'a','b','c'}, {'d'}] """ merged = set() while unmerged: elem = unmerged.pop() shares_elements = False for s in merged.copy(): if not elem.isdisjoint(s): merged.remove(s) merged.add(frozenset(s.union(elem))) shares_elements = True if not shares_elements: merged.add(frozenset(elem)) return [list(x) for x in merged]
13,738
def read_match_df(_url: str, matches_in_section: int=None) -> pd.DataFrame: """各グループの試合リスト情報を自分たちのDataFrame形式で返す JFA形式のJSONは、1試合の情報が下記のような内容 {'matchTypeName': '第1節', 'matchNumber': '1', # どうやら、Competitionで通しの番号 'matchDate': '2021/07/22', # 未使用 'matchDateJpn': '2021/07/22', 'matchDateWeek': '木', # 未使用 'matchTime': '20:00', # 未使用 'matchTimeJpn': '20:00', 'venue': '東京スタジアム', 'venueFullName': '東京/東京スタジアム', # 未使用 'homeTeamName': '日本', 'homeTeamQualificationDescription': '', # 未使用 'awayTeamName': '南アフリカ', 'awayTeamQualificationDescription': '', # 未使用 'score': { 'homeWinFlag': False, # 未使用 'awayWinFlag': False, # 未使用 'homeScore': '', 'awayScore': '', 'homeTeamScore1st': '', # 未使用 前半得点 'awayTeamScore1st': '', # 未使用 前半得点 'homeTeamScore2nd': '', # 未使用 後半得点 'awayTeamScore2nd': '', # 未使用 後半得点 'exMatch': False, 'homeTeamScore1ex': '', # 未使用 延長前半得点 'awayTeamScore1ex': '', # 未使用 延長前半得点 'homeTeamScore2ex': '', # 未使用 延長後半得点 'awayTeamScore2ex': '', # 未使用 延長後半得点 'homePKScore': '', # 未使用 PK得点 'awayPKScore': '' # 未使用 PK得点 }, 'scorer': { 'homeScorer': [], # 未使用 'awayScorer': [] # 未使用 }, 'matchStatus': '', 'officialReportURL': '' # 未使用 } """ match_list = read_match_json(_url)[SCHEDULE_CONTAINER_NAME][SCHEDULE_LIST_NAME] # print(match_list) result_list = [] match_index_dict = {} for (_count, _match_data) in enumerate(match_list): _row = {} for (target_key, org_key) in REPLACE_KEY_DICT.items(): _row[target_key] = _match_data[org_key] for (target_key, org_key) in SCORE_DATA_KEY_LIST.items(): _row[target_key] = _match_data['score'][org_key] _regexp_result = SECTION_NO.match(_row['section_no']) if _regexp_result: section_no = _regexp_result[1] elif matches_in_section is not None: # 節数の記載が無く、節ごとの試合数が分かっている時は計算 section_no = int(_count / matches_in_section) + 1 else: # 節数不明 section_no = 0 _row['section_no'] = section_no if section_no not in match_index_dict: match_index_dict[section_no] = 1 else: match_index_dict[section_no] += 1 _row['match_index_in_section'] = match_index_dict[section_no] # U18高円宮杯プリンス関東リーグでの中止情報は、なぜか 'venueFullName' に入っていたので暫定対応 if '【中止】' in _match_data['venueFullName']: print('Cancel Game## ' + _match_data['venueFullName']) _row['status'] = '試合中止' else: print('No Cancel## ' + _match_data['venueFullName']) result_list.append(_row) return pd.DataFrame(result_list)
13,739
def tokenize(text): """Tokenise text with lemmatizer and case normalisation. Args: text (str): text required to be tokenized Returns: list: tokenised list of strings """ url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' detected_urls = re.findall(url_regex, text) for url in detected_urls: text = text.replace(url, "urlplaceholder") tokens = word_tokenize(text) lemmatizer = WordNetLemmatizer() clean_tokens = [] for tok in tokens: clean_tok = lemmatizer.lemmatize(tok).lower().strip() clean_tokens.append(clean_tok) return clean_tokens
13,740
def reinforce_loss_discrete(classification_logits_t, classification_labels_t, locations_logits_t, locations_labels_t, use_punishment=False): """Computes REINFORCE loss for contentious discrete action spaces. Args: classification_logits_t: List of classification logits at each time point. classification_labels_t: List of classification labels at each time point. locations_logits_t: List of location logits at each time point. locations_labels_t: List of location labels at each time point. use_punishment: (Boolean) Reward {-1, 1} if true else {0, 1}. Returns: reinforce_loss: REINFORCE loss. """ classification_logits = tf.concat(classification_logits_t, axis=0) classification_labels = tf.concat(classification_labels_t, axis=0) locations_logits = tf.concat(locations_logits_t, axis=0) locations_labels = tf.concat(locations_labels_t, axis=0) rewards = tf.cast( tf.equal( tf.argmax(classification_logits, axis=1, output_type=classification_labels.dtype), classification_labels), dtype=tf.float32) # size (batch_size) each if use_punishment: # Rewards is \in {-1 and 1} instead of {0, 1}. rewards = 2. * rewards - 1. neg_advs = tf.stop_gradient(rewards - tf.reduce_mean(rewards)) log_prob = -tf.nn.sparse_softmax_cross_entropy_with_logits( logits=locations_logits, labels=locations_labels) loss = -tf.reduce_mean(neg_advs * log_prob) return loss
13,741
def train2(num,base_path=base_path): """ this function is used to process train.yzbx.txt format """ #train_data_file="/home/zyyang/RS/train.yzbx.txt" train_data_file=os.path.join(base_path,num,'train.yzbx.txt') b_data=defaultdict(list) fi=open(train_data_file,'r') size=0 maxb=0 for line in fi: s=line.strip().split() b=int(s[2]) maxb= max(b,maxb) o=b>int(s[1]) o=int(o) b_data[b].append(o) size+=1 fi.close() b_data=sorted(b_data.items(),key=lambda e:e[0],reverse=False) b_data=dict(b_data) bdns=[] wins=0 for z in b_data: wins=sum(b_data[z]) b=z d=wins n=size bdn=[b,d,n] bdns.append(bdn) size-=len(b_data[z]) zw_dict={} min_p_w=0 bdns_length=len(bdns) count=0 p_l_tmp=1.0 for bdn in bdns: count+=1 b=float(bdn[0]) d=float(bdn[1]) n=float(bdn[2]) if count<bdns_length: p_l_tmp*=(n-d)/n p_l=p_l_tmp p_w=max(1.0-p_l,min_p_w) zw_dict[int(b)]=p_w #print(zw_dict) return zw_dict,maxb
13,742
def analyze(binObj, task='skewer', frange=None, distort=True, CenAlpha=None, histbin=False, statistic='mean', suffix='temp', overwrite=False, skewer_index=None, zq_cut=[0, 5], parallel=False, tt_bins=None, verbose=True, nboot=100, calib_kwargs=None, skewer_kwargs=None): """ Function to perform important operations on the binObj Parameters: binObj: An instance of the bin_class task: one of ["data_points", "calibrate", "composite", "skewer"] frange: the Lyman Alpha forest ranges used for the analysis distort: warp the spectra to a common spectral index CenAlpha: the common spectral index to warp to histbin: perform histogram rebinninb statistic: statistic to use when creating composites [task="composite"] suffix: name of the file to write to overwrite: overwrite the skewer in the LogLikes folder if duplicates skewer_index: index of the skewers in the forest range (frange) to use zq_cut: allows to perform a cut in the quasar redshift parallel: whether to run the skewers in parallel tt_bins: Bins in lyman alpha redshift to use for task="data_points" calib_kwargs: additional keyword arguments for task="calibrate" skewer_kwargs: additional keyword arguments for task="skewer" """ if frange is None: frange = [1070, 1160] lyInd = np.where((binObj.wl > frange[0]) & (binObj.wl < frange[1]))[0] if skewer_index is None: skewer_index = range(len(lyInd)) else: skewer_index = np.atleast_1d(skewer_index) outfile = task + '_' + suffix if task == 'skewer' or task == 'data_points': if verbose: print('Total skewers available: {}, skewers analyzed in this ' 'run: {}'.format(len(lyInd), len(skewer_index))) myspec = binObj._flux[:, lyInd[skewer_index]] myivar = binObj._ivar[:, lyInd[skewer_index]] zMat = binObj._zAbs[:, lyInd[skewer_index]] mywave = binObj.wl[lyInd[skewer_index]] else: myspec, myivar, zMat = binObj._flux, binObj._ivar, binObj._zAbs mywave = binObj.wl myz, myalpha = binObj._zq, binObj._alpha # selecting according to quasar redshifts zq_mask = (myz > zq_cut[0]) & (myz < zq_cut[1]) myspec = myspec[zq_mask] myivar = myivar[zq_mask] zMat = zMat[zq_mask] myz, myalpha = myz[zq_mask], myalpha[zq_mask] # B. DATA PREPROCESSING --------------------------------------------------- if histbin: # Histogram binning in parameter space myp1, myp2 = binObj._par1, binObj._par2 myzbins = find_zbins(myz) hInd = np.where((myz >= myzbins[0]) & (myz < myzbins[-1])) # Modify the selection to choose only objects that fall in the # zbins range myz, myalpha = myz[hInd], myalpha[hInd] myp1, myp2 = myp1[hInd], myp2[hInd] myspec, myivar = myspec[hInd], myivar[hInd] zMat = zMat[hInd] if binObj._hWeights is None: h_weights = hist_weights(myp1, myp2, myz, myzbins) binObj._hWeights = h_weights myivar = myivar * h_weights[:, None] else: myivar = myivar * binObj._hWeights[:, None] if distort: # Distort spectra in alpha space outfile += '_distort' if CenAlpha is None: CenAlpha = np.median(myalpha) distortMat = np.array([(mywave / 1450.) ** ele for ele in (CenAlpha - myalpha)]) myspec *= distortMat myivar /= distortMat ** 2 if verbose: print('All spectra distorted to alpha:', CenAlpha) # C. CALIBRATION VS ESTIMATION -------------------------------------------- if task == "data_points": print("Make sure that the reconstructed continuum has been run using " "the same frange as that being used right now!") # Data points for the transmission, using a continuum as the base if binObj.continuum is None: raise AttributeError("Set the reconstructed continuum for the" "bin first!!!") ivar_mask = (myivar > 0).flatten() zLyAs = zMat.flatten() zLyAs = zLyAs[ivar_mask] # bin centers for the redshift-transmission plot if tt_bins is None: tt_bins = np.linspace(zLyAs.min(), zLyAs.max(), 40) tt_cens = (tt_bins[1:] + tt_bins[:-1]) / 2. # errors from t0-gamma fluctuations # We are not going to use this in the paper !!! tt_binned = np.zeros((len(binObj.continuum), len(tt_cens))) for i in range(len(binObj.continuum)): tt = (myspec / binObj.continuum[i]).flatten() tt = tt[ivar_mask] tt_binned[i] = binned_statistic(zLyAs, tt, statistic=np.mean, bins=tt_bins).statistic continuum = binObj.continuum.mean(0) # estimates of the transmission central values - errors obtained # using bootstrap as below tt_cen = (myspec / continuum).flatten() tt_cen = tt_cen[ivar_mask] tt_data = binned_statistic(zLyAs, tt_cen, statistic=np.mean, bins=tt_bins).statistic # tt_std = binned_statistic(zLyAs, tt_cen, statistic=np.std, # bins=tt_bins).statistic # tt_counts = binned_statistic(zLyAs, None, statistic='count', # bins=tt_bins).statistic # errors from bootstrapping print("Computing bootstrap samples of transmission") tt_boot = np.zeros((nboot, len(tt_cens))) for i in range(nboot): np.random.seed() ixs = np.random.randint(0, len(myivar), len(myivar)) sp_boot, iv_boot = myspec[ixs], myivar[ixs] zz_boot = zMat[ixs] ivar_mask = (iv_boot > 0).flatten() zLyAs = zz_boot.flatten() zLyAs = zLyAs[ivar_mask] tt = (sp_boot / continuum).flatten() tt = tt[ivar_mask] tt_boot[i] = binned_statistic(zLyAs, tt, statistic=np.mean, bins=tt_bins).statistic # Save this to a file for future use - # Use this for the analysis of figure 6 <-- data_full = np.array([tt_cens, tt_data, *tt_boot]) np.savetxt("data_points_" + binObj.name + ".dat", data_full) return tt_cens, tt_data, tt_binned, tt_boot # , tt_std / np.sqrt(tt_counts) if task == 'calibrate': ixs = (myz > 1.6) & (myz < 4) print('Number of spectra used for calibration are: %d' % ixs.sum()) rest_range = [[1280, 1290], [1320, 1330], [1345, 1360], [1440, 1480]] # normalization range used obs_min, obs_max = 4600, 4640 corrections.calibrate(binObj.wl, myspec[ixs], myivar[ixs], myz[ixs], rest_range, obs_min, obs_max, binObj.name, True) # D. COMPOSITE CREATION IF SPECIFIED -------------------------------------- if task == 'composite': # Create composites using the spectra # zbins = find_zbins(myz) zbins = np.arange(2.1, 4.5, 0.05) # comp_simple.compcompute(myspec, myivar, myz, mywave, # zbins, statistic, outfile) create_comp.create_comp(myspec, myivar, myz, mywave, zbins, outfile) # E. LIKELIHOOD SKEWER ---------------------------------------------------- if task == 'skewer': currDir = os.getcwd() destDir = '../LogLikes' + '/Bin_' + outfile +\ str(frange[0]) + '_' + str(frange[1]) # <-- if not os.path.exists(destDir): os.makedirs(destDir) else: if overwrite: shutil.rmtree(destDir) os.makedirs(destDir) os.chdir(destDir) start = timer() # Do not plot graphs while in parallel res = None if parallel: pass # print('Running in parallel now!') # myfunc_partial = partial(mcmc_skewer.mcmcSkewer, **skewer_kwargs) # pool = Pool() # res = pool.map(myfunc_partial, # zip(np.array([zMat, myspec, myivar]).T, skewer_index)) # pool.close() # pool.join() # else: # for j, ele in enumerate(skewer_index): # res = mcmc_skewer.mcmcSkewer( # [np.array([zMat[:, j], myspec[:, j], myivar[:, j]]).T, ele], # **skewer_kwargs) stop = timer() print('Time elapsed:', stop - start) os.chdir(currDir) return mywave, res
13,743
def distances(spike_times, ii_spike_times, epoch_length=1.0, metric='SPOTD_xcorr'): """Compute temporal distances based on various versions of the SPOTDis, using CPU parallelization. Parameters ---------- spike_times : numpy.ndarray 1 dimensional matrix containing all spike times ii_spike_times : numpy.ndarray MxNx2 dimensional matrix containing the start and end index for the spike_times array for any given epoch and channel combination metric : str Pick the specific metric by combining the metric ID with either 'xcorr' to compute it on pairwise xcorr histograms or 'times' to compute it directly on spike times. Currently available: * SPOTD_xcorr * SPOTD_xcorr_pooled * SPOTD_spikes Returns ------- distances : numpy.ndarray MxM distance matrix with numpy.nan for unknown distances """ n_epochs = ii_spike_times.shape[0] epoch_index_pairs = np.array( list(itertools.combinations(range(n_epochs), 2)), dtype=int) # SPOTDis comparing the pairwise xcorrs of channels if metric == 'SPOTD_xcorr': distances, percent_nan = xcorr_spotdis_cpu_( spike_times, ii_spike_times, epoch_index_pairs) distances = distances / (2*epoch_length) # SPOTDis comparing the xcorr of a channel with all other channels pooled elif metric == 'SPOTD_xcorr_pooled': distances, percent_nan = xcorr_pooled_spotdis_cpu_( spike_times, ii_spike_times, epoch_index_pairs) distances = distances / (2*epoch_length) # SPOTDis comparing raw spike trains elif metric == 'SPOTD_spikes': distances, percent_nan = spike_spotdis_cpu_( spike_times, ii_spike_times, epoch_index_pairs) distances = distances / epoch_length # Otherwise, raise exception else: raise NotImplementedError('Metric "{}" unavailable, check doc-string for alternatives.'.format( metric)) np.fill_diagonal(distances, 0) return distances
13,744
def test_hackerone_program_list_command_when_invalid_args_provided(client, args, expected_error): """ Test case scenario when invalid arguments are provided. Given: - invalid command arguments for list program command When - Calling `hackerone-program-list` Then: - Returns the response message of invalid input arguments """ from HackerOne import hackerone_program_list_command with pytest.raises(ValueError) as err: hackerone_program_list_command(client, args) assert str(err.value) == expected_error
13,745
def def_phygrid(nbins, outname="phys_grid.pkl", log=False, \ rootpath=os.getcwd()+'/'): """ Sub-routine that defines the physical grid. Parameters ---------- nbins: int number of the physical grids Keywords -------- outname: str name of the output .pkl file (def: "phys_grid.pkl") log: boolean whether draw the grid in logarithmic scale or not rootpath: str root path that places the output pickle file (def: current path) Return ------ No return, but the pickle file with outname is created. """ # Physical parameters defined in Ji+2006, Sec.3.4 parsec = 3.0856780e+18 mu = 1.4 u0 = 1e3 #in km/s T0 = 5e6 #in K cs0 = np.sqrt( 5*cons.k*1e7*T0 / (3*mu*cons.m_p*1e3) ) / 1e5 #in km/s Tc = T0 + mu*cons.m_p*1e3*(u0*1e5)**2 / (5*cons.k*1e7) #in K r0 = 0.3 #in pc mdot0 = 3e-5 * 1.989e+33 / cons.year #in g/s c1 = mdot0 / (4*math.pi) #constant coefficient for \rho*r**2*u nH0 = c1 / ( u0*1e5 * (r0*parsec)**2 * mu*cons.m_p*1e3 ) #in cm-3 ne0 = nH0 * (mu+1)/2 if log: Tval = 10**( -np.linspace(0,nbins,nbins+1) / (nbins/2.5) )*T0 else: Tval = np.linspace(nbins,1,nbins) / nbins * T0 nTval = len(Tval) # Derive the radius array and the other physical parameters Tr_val = r0 / ( (Tval/T0)**3 * (Tc-Tval)/(Tc-T0) )**0.25 #in pc u_app = np.sqrt( 3*cs0**2 + u0**2 - 3*cs0**2/(Tr_val/r0)**(4/3.) ) ne_app = u0/u_app * (r0/Tr_val)**2 * ne0 # Save the physical grid as pickle file outdat = {} outdat['R'] = Tr_val*parsec outdat['velo'] = u_app*1e5 outdat['dens'] = ne_app outdat['kT'] = Tval*cons.k*1e7 / (cons.eV*1e3*1e7) tmp = open(rootpath+outname,'wb') pickle.dump(outdat,tmp) tmp.close()
13,746
def format_object_translation(object_translation, typ): """ Formats the [poi/event/page]-translation as json :param object_translation: A translation object which has a title and a permalink :type object_translation: ~cms.models.events.event.Event or ~cms.models.pages.page.Page or ~cms.models.pois.poi.POI :param typ: The type of this object :type typ: str :return: A dictionary with the title, url and type of the translation object :rtype: dict """ return { "title": object_translation.title, "url": f"{WEBAPP_URL}/{object_translation.permalink}", "type": typ, }
13,747
def _FormatKeyValuePairsToLabelsMessage(labels): """Converts the list of (k, v) pairs into labels API message.""" sorted_labels = sorted(labels, key=lambda x: x[0] + x[1]) return [ api_utils.GetMessage().KeyValue(key=k, value=v) for k, v in sorted_labels ]
13,748
def gen_unique(func): """ Given a function returning a generator, return a function returning a generator of unique elements""" return lambda *args: unique(func(*args))
13,749
def app_actualizar(): """ Actualizar datos a través de formulario """ helper.menu() # Seccion actualizar output.span(output.put_markdown("## Sección Actualizar")) output.put_markdown(f"Actualizar una fila") form_update = input.input_group("Actualizar Datos", [ input.input(label="ID", type=input.NUMBER, name="id"), input.input(label="Sepal Length (cm)", type=input.FLOAT, name="sepal_length"), input.input(label="Sepal Width (cm)", type=input.FLOAT, name="sepal_width"), input.input(label="Petal Length (cm)", type=input.FLOAT, name="petal_length"), input.input(label="Petal Width (cm)", type=input.FLOAT, name="petal_width"), input.select(label="Species:", options=["setosa", "virginica", "versicolor"], name="species"), ]) actualizar(form_update)
13,750
def admin_inventory(request): """ View to handle stocking up inventory, adding products... """ context = dict(product_form=ProductForm(), products=Product.objects.all(), categories=Category.objects.all(), transactions=request.user.account.transaction_set.all() ) return render(request, 'namubufferiapp/admin_handleinventory.html', context)
13,751
def snippet_list(request): """ List all code snippets, or create a new snippet. """ print(f'METHOD @ snippet_list= {request.method}') if request.method == 'GET': snippets = Snippet.objects.all() serializer = SnippetSerializer(snippets, many=True) return JsonResponse(serializer.data, safe=False) elif request.method == 'POST': data = JSONParser().parse(request) serializer = SnippetSerializer(data=data) if serializer.is_valid(): serializer.save() return JsonResponse(serializer.data, status=201) return JsonResponse(serializer.errors, status=400)
13,752
def generate_submission(args: ArgumentParser, submission: pd.DataFrame) -> pd.DataFrame: """Take Test Predictions for 4 classes to Generate Submission File""" image, kind = args.shared_indices df = submission.reset_index()[[image, args.labels[0]]] df.columns = ["Id", "Label"] df.set_index("Id", inplace=True) df["Label"] = 1. - df["Label"] print(f"\nSubmission Stats:\n{df.describe()}\nSubmission Head:\n{df.head()}") return df
13,753
def nearest1d(vari, yi, yo, extrap="no"): """Nearest interpolation of nD data along an axis with varying coordinates Warning ------- `nxi` must be either a multiple or a divisor of `nxo`, and multiple of `nxiy`. Parameters ---------- vari: array_like(nxi, nyi) yi: array_like(nxiy, nyi) yo: array_like(nxo, nyo) Return ------ array_like(nx, nyo): varo With `nx=max(nxi, nxo)` """ # Shapes nxi, nyi = vari.shape nxiy = yi.shape[0] nxi, nyi = vari.shape nxo, nyo = yo.shape nx = max(nxi, nxo) # Init output varo = np.full((nx, nyo), np.nan, dtype=vari.dtype) # Loop on the varying dimension for ix in numba.prange(nx): # Index along x for coordinate arrays ixi = min(nxi-1, ix % nxi) ixiy = min(nxiy-1, ix % nxiy) ixoy = min(nxo-1, ix % nxo) # Loop on input grid iyimin, iyimax = get_iminmax(yi[ixiy]) iyomin, iyomax = get_iminmax(yo[ixoy]) for iyi in range(iyimin, iyimax): # Out of bounds if yi[ixiy, iyi+1] < yo[ixoy, iyomin]: continue if yi[ixiy, iyi] > yo[ixoy, iyomax]: break # Loop on output grid for iyo in range(iyomin, iyomax+1): dy0 = yo[ixoy, iyo] - yi[ixiy, iyi] dy1 = yi[ixiy, iyi+1] - yo[ixoy, iyo] # Above if dy1 < 0: # above break # Below if dy0 < 0: iyomin = iyo + 1 # Interpolations elif dy0 <= dy1: varo[ix, iyo] = vari[ixi, iyi] else: varo[ix, iyo] = vari[ixi, iyi+1] # Extrapolation if extrap != "no": varo = extrap1d(varo, extrap) return varo
13,754
def registros(): """Records page.""" return render_template('records.html')
13,755
def cal_evar(rss, matrix_v): """ Args: rss: matrix_v: Returns: """ evar = 1 - (rss / np.sum(matrix_v ** 2)) return evar
13,756
def split_path(path): """ public static List<String> splitPath(String path) * Converts a path expression into a list of keys, by splitting on period * and unquoting the individual path elements. A path expression is usable * with a {@link Config}, while individual path elements are usable with a * {@link ConfigObject}. * <p> * See the overview documentation for {@link Config} for more detail on path * expressions vs. keys. * * @param path * a path expression * @return the individual keys in the path * @throws ConfigException * if the path expression is invalid """ return impl_util.split_path(path)
13,757
def load_dataset_RGB(split_th = 0.8, ext='.jpg'): """ Default: 80% for training, 20% for testing """ positive_dir = '/media/himanshu/ce640fc3-0289-402c-9150-793e07e55b8c/visapp2018code/RGB/data/positive' negative_dir = '/media/himanshu/ce640fc3-0289-402c-9150-793e07e55b8c/visapp2018code/RGB/data/negative' # positive_dir = '/home/himanshu/Documents/Projects/DLbasics/visapp2018code/RGB/data/positive' # negative_dir = '/home/himanshu/Documents/Projects/DLbasics/visapp2018code/RGB/data/negative' t_files = os.listdir(path.join(positive_dir, '1')) total_pos_files = len(t_files) t_files = os.listdir(path.join(negative_dir, '1')) total_neg_files = len(t_files) print('pos files: ',total_pos_files) print('neg files: ',total_neg_files) # total_files = total_pos_files + total_neg_files total_files = 1000 X1 = numpy.zeros( (total_files,96,128,3), dtype=numpy.uint8 ) X2 = numpy.zeros( (total_files,96,128,3), dtype=numpy.uint8 ) X3 = numpy.zeros( (total_files,96,128,3), dtype=numpy.uint8 ) y = numpy.zeros( (total_files), dtype=numpy.uint8 ) pos_file_counter = 0 neg_file_counter = 0 total_counter = 0 while total_counter < total_files: show_progress(max_val=total_files, present_val=total_counter) if total_counter % 2 == 0: # case: positive im1_path = path.join(positive_dir, '1', str(pos_file_counter+1)+ext) im2_path = path.join(positive_dir, '2', str(pos_file_counter+1)+ext) im3_path = path.join(positive_dir, '3', str(pos_file_counter+1)+ext) im1 = cv2.imread(im1_path) im2 = cv2.imread(im2_path) im3 = cv2.imread(im3_path) # cv2.imshow("Image 1", im1) # cv2.imshow("Image 2", im2) # cv2.imshow("Image 3", im3) # cv2.waitKey(0) X1[total_counter,:,:,:] = cv2.resize(im1, dsize=(128, 96), interpolation=cv2.INTER_CUBIC) # Resize image X2[total_counter,:,:,:] = cv2.resize(im2, dsize=(128, 96), interpolation=cv2.INTER_CUBIC) # Resize image X3[total_counter,:,:,:] = cv2.resize(im3, dsize=(128, 96), interpolation=cv2.INTER_CUBIC) # Resize image y[total_counter] = 1 pos_file_counter += 1 else: im1_path = path.join(negative_dir, '1', str(neg_file_counter+1)+ext) im2_path = path.join(negative_dir, '2', str(neg_file_counter+1)+ext) im3_path = path.join(negative_dir, '3', str(neg_file_counter+1)+ext) im1 = cv2.imread(im1_path) im2 = cv2.imread(im2_path) im3 = cv2.imread(im3_path) # cv2.imshow("Image 1", im1) # cv2.imshow("Image 2", im2) # cv2.imshow("Image 3", im3) # cv2.waitKey(0) X1[total_counter,:,:,:] = cv2.resize(im1, dsize=(128, 96), interpolation=cv2.INTER_CUBIC) # Resize image X2[total_counter,:,:,:] = cv2.resize(im2, dsize=(128, 96), interpolation=cv2.INTER_CUBIC) # Resize image X3[total_counter,:,:,:] = cv2.resize(im3, dsize=(128, 96), interpolation=cv2.INTER_CUBIC) # Resize image y[total_counter] = 0 neg_file_counter += 1 total_counter += 1 # normalize inputs from 0-255 to 0.0-1.0 X1 = X1.astype('float32') X2 = X2.astype('float32') X3 = X3.astype('float32') X1 = X1 / 255.0 X2 = X2 / 255.0 X3 = X3 / 255.0 training_samples_limit = math.ceil( split_th * total_counter ) X1_train = X1[0:training_samples_limit,:,:,:] X2_train = X2[0:training_samples_limit,:,:,:] X3_train = X3[0:training_samples_limit,:,:,:] y_train = y[0:training_samples_limit] X1_test = X1[training_samples_limit:total_counter,:,:,:] X2_test = X2[training_samples_limit:total_counter,:,:,:] X3_test = X3[training_samples_limit:total_counter,:,:,:] y_test = y[training_samples_limit:total_counter] return [X1_train, X2_train, X3_train, y_train, X1_test, X2_test, X3_test, y_test]
13,758
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_d(): """Dilated hparams.""" hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated() hparams.gap_sizes = [0, 16, 64, 16, 64, 128, 256, 0] return hparams
13,759
def leaveOneOut_Input_v4( leaveOut ): """ Generate observation matrix and vectors Y, F Those observations are trimed for the leave-one-out evaluation. Therefore, the leaveOut indicates the CA id to be left out, ranging from 1-77 """ des, X = generate_corina_features('ca') X = np.delete(X, leaveOut-1, 0) popul = X[:,0].reshape(X.shape[0],1) pvt = X[:,2] # poverty index of each CA # poi_cnt = getFourSquareCount(leaveOut) # poi_cnt = np.divide(poi_cnt, popul) * 10000 poi_dist = getFourSquarePOIDistribution(leaveOut) poi_dist = np.divide(poi_dist, popul) * 10000 F_dist = generate_geographical_SpatialLag_ca( leaveOut=leaveOut ) F_flow = generate_transition_SocialLag(year=2010, lehd_type=0, region='ca', leaveOut=leaveOut) F_taxi = getTaxiFlow(leaveOut = leaveOut) Y = retrieve_crime_count(year=2010, col=['total'], region='ca') Y = np.delete(Y, leaveOut-1, 0) Y = np.divide(Y, popul) * 10000 F = [] n = Y.size Yd = [] for i in range(n): for j in range(n): if i != j: wij = np.array( [F_dist[i,j], actualFlowInteraction(pvt[i], pvt[j]) * F_flow[i,j], F_taxi[i,j] ]) # fij = np.concatenate( (X[i], poi_dist[i], wij * Y[j][0]), 0) fij = np.concatenate( (X[i], wij * Y[j][0]), 0) F.append(fij) Yd.append(Y[i]) F = np.array(F) np.append(F, np.ones( (F.shape[0], 1) ), axis=1) Yd = np.array(Yd) Yd.resize( (Yd.size, 1) ) return Yd, F
13,760
def common_inroom_auth_response(name, request, operate, op_args): """ > 通用的需要通过验证用户存在、已登录、身处 Room 的操作。 参数: - name: 操作名,用于日志输出; - request: Flask 传来的 request; - operate: 具体的操作函数,参数为需要从 request.form 中提取的值,返回值为成功后的response json; - op_args: operate 函数的 参数名 str 组成的列表。 返回:response json 说明: 这个函数会从 request.form 中提取 from_uid 以及 op_args 中指定的所有值,若没有对应的值,会返回 unexpected; 然后该函数会对用户是否 exist、login、inRoom 进行检测,若有不满足,返回 from_not_exist,from_not_login 或 from_not_in_room; 通过了所有验证后,将调用 operate 函数,并用 argument unpacking 的方法把解析得到的 args 传给 operate。 """ try: assert request.method == 'POST', "method should be POST" assert isinstance(op_args, (tuple, list)), "op_args should be tuple or list" from_uid = None args = {} try: from_uid = request.form["from_uid"] for i in op_args: args[i] = request.form[i] except KeyError: raise RequestError("not enough param") # 发起用户验证 if not au.byUid.exist(from_uid): logging.critical('<{name}>: from_not_exist. from_uid = {from_uid}'.format(name=name, from_uid=from_uid)) return response_error(get_simple_error_content(ResponseError.from_not_exist)) if not au.byUid.logined(from_uid): logging.error('<{name}>: from_not_login. from_uid = {from_uid}'.format(name=name, from_uid=from_uid)) return response_error(get_simple_error_content(ResponseError.from_not_login)) if not au.byUid.inroom(from_uid): logging.error('<{name}>: from_not_in_room. from_uid = {from_uid}'.format(name=name, from_uid=from_uid)) return response_error(get_simple_error_content(ResponseError.from_not_in_room)) # 通过验证,可以操作 return operate(**args) except Exception as e: logging.error('<{name}>: unexpected. request = {request}, request.form = {form}'.format( name=name, request=request, form=request.form)) return response_unexpected(e)
13,761
def tab_printer(args): """ Function to print the logs in a nice tabular format. :param args: Parameters used for the model. """ args = vars(args) t = Texttable() t.add_rows([["Parameter", "Value"]] + [[k.replace("_"," ").capitalize(),v] for k,v in args.iteritems()]) print t.draw()
13,762
def get_pca(acts, compute_dirns=False): """ Takes in neuron activations acts and number of components. Returns principle components and associated eigenvalues. Args: acts: numpy array, shape=(num neurons, num datapoints) n_components: integer, number of pca components to reduce to """ assert acts.shape[0] < acts.shape[1], ("input must be number of neurons" "by datapoints") # center activations means = np.mean(acts, axis=1, keepdims=True) cacts = acts - means # compute PCA using SVD U, S, V = np.linalg.svd(cacts, full_matrices=False) return_dict = {} return_dict["eigenvals"] = S return_dict["neuron_coefs"] = U.T if compute_dirns: return_dict["pca_dirns"] = np.dot(U.T, cacts) + means return return_dict
13,763
def refresh_lease(lease_id, client_id, epoch, ttl): """ Update the timeout on the lease if my_id is the lease owner, else fail. :param lease_id: :param client_id: :param ttl: number of seconds in the future to set the expiration to, can lengthen or shorten expiration depending on current value of lease. :param epoch: :return: new expiration datetime """ if not lease_id: raise ValueError(lease_id) if not client_id: raise ValueError(client_id) if not epoch: raise ValueError(epoch) if not ttl: raise ValueError(ttl) retries = REFRESH_RETRIES logger.debug('Refreshing lease {}'.format(lease_id)) while retries > 0: try: with session_scope() as db: lease = db.query(Lease).with_for_update(of=Lease, nowait=False).get((lease_id)) if not lease: raise KeyError(lease_id) if lease.held_by != client_id: raise Exception('Lock no longer held by this id') else: lease.set_holder(lease.held_by, duration_sec=ttl) return lease.to_json() except KeyError: raise except Exception as e: if not is_lock_acquisition_error(e): logger.exception('Failed updating lease duration for {} due to exception'.format(lease_id)) retries -= 1 else: logger.error('Failed updating lease duration {} after all retries'.format(lease_id)) return None
13,764
def plot_cor_centroids(axs, ctd, zms): """plots coronal centroids on a plane axes Parameters: ---------- axs: matplotlib axs ctd: list of centroids zms: the spacing of the image """ # requires v_dict = dictionary of mask labels for v in ctd[1:]: axs.add_patch(Circle((v[3]*zms[2], v[1]*zms[0]), 2, color=colors_itk[v[0]-1])) axs.text(4, v[1]*zms[0], v_dict[v[0]], fontdict={'color': cm_itk(v[0]-1), 'weight': 'bold'})
13,765
def checkLastJob(jobsFolder): """Count number of folders in folder :param jobsFolder: directory with jobs :return: number of created jobs """ allFolders = os.listdir(jobsFolder) jobsFolders = [f for f in allFolders if f.startswith('job')] jobsCount = len(jobsFolders) return jobsCount
13,766
def canny(img, low_threshold, high_threshold): """Applies the Canny transform""" #imgCopy = np.uint8(img) return cv2.Canny(img, low_threshold, high_threshold)
13,767
def pairwise_two_tables(left_table, right_table, allow_no_right=True): """ >>> pairwise_two_tables( ... [("tag1", "L1"), ("tag2", "L2"), ("tag3", "L3")], ... [("tag1", "R1"), ("tag3", "R3"), ("tag2", "R2")], ... ) [('L1', 'R1'), ('L2', 'R2'), ('L3', 'R3')] >>> pairwise_two_tables( ... [("tag1", "L1"), ("tag2", "L2")], ... [("tag1", "R1"), ("tag3", "R3"), ("tag2", "R2")], ... ) Traceback (most recent call last): vrename.NoLeftValueError: ('tag3', 'R3') >>> pairwise_two_tables( ... [("tag1", "L1"), ("tag2", "L2"), ("tag3", "L3")], ... [("tag1", "R1"), ("tag3", "R3")], ... False, ... ) Traceback (most recent call last): vrename.NoRightValueError: ('tag2', 'L2') >>> pairwise_two_tables( ... [("tag1", "L1"), ("tag2", "L2"), ("tag3", "L3")], ... [("tag1", "R1"), ("tag3", "R3")], ... ) [('L1', 'R1'), ('L2', None), ('L3', 'R3')] >>> pairwise_two_tables( ... [("tag1", "L1"), ("tag1", "L1-B")], ... [] ... ) Traceback (most recent call last): vrename.DuplicateTagError: ('tag1', ['L1', 'L1-B']) >>> pairwise_two_tables( ... [("tag1", "L1"), ("tag2", "L2"), ("tag3", "L3")], ... [("tag1", "R1"), ("tag3", "R3"), ("tag2", "R2"), ("tag1", "R1-B")], ... ) Traceback (most recent call last): vrename.MultipleRightValueError: ('tag1', 'L1', ['R1', 'R1-B']) """ pairs = [] for tag, (left, rights) in _confront_two_tables(left_table, right_table): if len(rights) > 1: raise MultipleRightValueError(tag, left, rights) if not rights: if allow_no_right: pairs.append((left, None)) else: raise NoRightValueError(tag, left) else: pairs.append((left, rights[0])) return pairs
13,768
def augment_stochastic_shifts(seq, augment_shifts): """Apply a stochastic shift augmentation. Args: seq: input sequence of size [batch_size, length, depth] augment_shifts: list of int offsets to sample from Returns: shifted and padded sequence of size [batch_size, length, depth] """ shift_index = tf.random.uniform(shape=[], minval=0, maxval=len(augment_shifts), dtype=tf.int64) shift_value = tf.gather(tf.constant(augment_shifts), shift_index) seq = tf.cond(tf.not_equal(shift_value, 0), lambda: shift_sequence(seq, shift_value), lambda: seq) return seq
13,769
def _SourceArgs(parser): """Add mutually exclusive source args.""" source_group = parser.add_mutually_exclusive_group() def AddImageHelp(): """Returns detailed help for `--image` argument.""" template = """\ An image to apply to the disks being created. When using this option, the size of the disks must be at least as large as the image size. Use ``--size'' to adjust the size of the disks. {alias_table} This flag is mutually exclusive with ``--source-snapshot''. """ indent = template.find(template.lstrip()[0]) return template.format( alias_table=image_utils.GetImageAliasTable(indent=indent)) image = source_group.add_argument( '--image', help='An image to apply to the disks being created.') image.detailed_help = AddImageHelp image_utils.AddImageProjectFlag(parser) source_group.add_argument( '--image-family', help=('The family of the image that the boot disk will be initialized ' 'with. When a family is used instead of an image, the latest ' 'non-deprecated image associated with that family is used.') ) source_snapshot = source_group.add_argument( '--source-snapshot', help='A source snapshot used to create the disks.') source_snapshot.detailed_help = """\ A source snapshot used to create the disks. It is safe to delete a snapshot after a disk has been created from the snapshot. In such cases, the disks will no longer reference the deleted snapshot. To get a list of snapshots in your current project, run `gcloud compute snapshots list`. A snapshot from an existing disk can be created using the 'gcloud compute disks snapshot' command. This flag is mutually exclusive with ``--image''. When using this option, the size of the disks must be at least as large as the snapshot size. Use ``--size'' to adjust the size of the disks. """
13,770
def get_all_species_links_on_page(url): """Get all the species list on the main page.""" data, dom = get_dom(url) table = dom.find('.tableguides.table-responsive > table a') links = [] for link in table: if link is None or link.text is None: continue links.append(dict( name=link.text.strip().lower(), url=DAVES_URL_BY_SPECIES + link.get('href') )) return links
13,771
def gen_image_name(reference: str) -> str: """ Generate the image name as a signing input, based on the docker reference. Args: reference: Docker reference for the signed content, e.g. registry.redhat.io/redhat/community-operator-index:v4.9 """ no_tag = reference.split(":")[0] image_parts = no_tag.split("/") return "/".join(image_parts[1:])
13,772
def adaptive_confidence_interval(values, max_iterations=1000, alpha=0.05, trials=5, variance_threshold=0.5): """ Compute confidence interval using as few iterations as possible """ try_iterations = 10 while True: intervals = [confidence_interval(values, try_iterations, alpha) for _ in range(trials)] band_variance = variance([upper_bound - lower_bound for lower_bound, upper_bound in intervals]) print(try_iterations, band_variance) if band_variance < variance_threshold or try_iterations > max_iterations: return intervals[np.random.randint(0, trials)], try_iterations try_iterations *= 2
13,773
def get_chat_id(update): """ Get chat ID from update. Args: update (instance): Incoming update. Returns: (int, None): Chat ID. """ # Simple messages if update.message: return update.message.chat_id # Menu callbacks if update.callback_query: return update.callback_query.message.chat_id return None
13,774
def action(fun): """Method decorator signaling to Deployster Python wrapper that this method is a resource action.""" # TODO: validate function has single 'args' argument (using 'inspect.signature(fun)') fun.action = True return fun
13,775
def chooseCommertialCity(commercial_cities): """ Parameters ---------- commercial_cities : list[dict] Returns ------- commercial_city : dict """ print(_('From which city do you want to buy resources?\n')) for i, city in enumerate(commercial_cities): print('({:d}) {}'.format(i + 1, city['name'])) selected_city_index = read(min=1, max=len(commercial_cities)) return commercial_cities[selected_city_index - 1]
13,776
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu): """Reusable code for making a simple neural net layer. It does a matrix multiply, bias add, and then uses relu to nonlinearize. It also sets up name scoping so that the resultant graph is easy to read, and adds a number of summary ops. """ # Adding a name scope ensures logical grouping of the layers in the graph. with tf.name_scope(layer_name): # This Variable will hold the state of the weights for the layer with tf.name_scope('weights'): weights = weight_variable([input_dim, output_dim]) variable_summaries(weights) with tf.name_scope('biases'): biases = bias_variable([output_dim]) variable_summaries(biases) with tf.name_scope('Wx_plus_b'): preactivate = tf.matmul(input_tensor, weights) + biases tf.summary.histogram('pre_activations', preactivate) activations = act(preactivate, name="activation") tf.summary.histogram('activations', activations) return activations
13,777
def move_right_row(row, debug=True): """move single row to right.""" if debug: print(row) row_del_0 = [] for i in row: # copy non-zero blocks if i != 0: row_del_0.append(i) #print(row_del_0) row = row_del_0 i = 0 j = len(row_del_0) - 1 while i < j: # combine blocks #print(i, j) if row[j] == row[j-1]: row[j-1] *= 2 del row[j] j -= 2 else: j -= 1 #print(i, j) #print(row_del_0) for i in range(4 - len(row_del_0)): # insert zeros row_del_0.insert(0,0) if debug: print(row) return row
13,778
def enable_debug_mode() -> None: """ Enable PyBryt's debug mode. """ global _DEBUG_MODE_ENABLED _DEBUG_MODE_ENABLED = True
13,779
def escape(s): """ Built-in javascript function to HTML escape a string. The escape() function encodes special characters, with the exception of: * @ - _ + . / Use the unescape() function to decode strings encoded with escape(). For example: >>> escape("?!=()#%&") %3F%21%3D%28%29%23%25%26 """
13,780
def get_validate_platform(cmd, platform): """Gets and validates the Platform from both flags :param str platform: The name of Platform passed by user in --platform flag """ OS, Architecture = cmd.get_models('OS', 'Architecture', operation_group='runs') # Defaults platform_os = OS.linux.value platform_arch = Architecture.amd64.value platform_variant = None if platform: platform_split = platform.split('/') platform_os = platform_split[0] platform_arch = platform_split[1] if len(platform_split) > 1 else Architecture.amd64.value platform_variant = platform_split[2] if len(platform_split) > 2 else None platform_os = platform_os.lower() platform_arch = platform_arch.lower() valid_os = get_valid_os(cmd) valid_arch = get_valid_architecture(cmd) valid_variant = get_valid_variant(cmd) if platform_os not in valid_os: raise CLIError( "'{0}' is not a valid value for OS specified in --platform. " "Valid options are {1}.".format(platform_os, ','.join(valid_os)) ) if platform_arch not in valid_arch: raise CLIError( "'{0}' is not a valid value for Architecture specified in --platform. " "Valid options are {1}.".format( platform_arch, ','.join(valid_arch)) ) if platform_variant and (platform_variant not in valid_variant): raise CLIError( "'{0}' is not a valid value for Variant specified in --platform. " "Valid options are {1}.".format( platform_variant, ','.join(valid_variant)) ) return platform_os, platform_arch, platform_variant
13,781
def get_path_cost(slice, offset, parameters): """ part of the aggregation step, finds the minimum costs in a D x M slice (where M = the number of pixels in the given direction) :param slice: M x D array from the cost volume. :param offset: ignore the pixels on the border. :param parameters: structure containing parameters of the algorithm. :return: M x D array of the minimum costs for a given slice in a given direction. """ other_dim = slice.shape[0] disparity_dim = slice.shape[1] disparities = [d for d in range(disparity_dim)] * disparity_dim disparities = np.array(disparities).reshape(disparity_dim, disparity_dim) penalties = np.zeros(shape=(disparity_dim, disparity_dim), dtype=slice.dtype) penalties[np.abs(disparities - disparities.T) == 1] = parameters.P1 penalties[np.abs(disparities - disparities.T) > 1] = parameters.P2 minimum_cost_path = np.zeros(shape=(other_dim, disparity_dim), dtype=slice.dtype) minimum_cost_path[offset - 1, :] = slice[offset - 1, :] for i in range(offset, other_dim): previous_cost = minimum_cost_path[i - 1, :] current_cost = slice[i, :] costs = np.repeat(previous_cost, repeats=disparity_dim, axis=0).reshape(disparity_dim, disparity_dim) costs = np.amin(costs + penalties, axis=0) minimum_cost_path[i, :] = current_cost + costs - np.amin(previous_cost) return minimum_cost_path
13,782
def authorize_cache_security_group_ingress(CacheSecurityGroupName=None, EC2SecurityGroupName=None, EC2SecurityGroupOwnerId=None): """ Allows network ingress to a cache security group. Applications using ElastiCache must be running on Amazon EC2, and Amazon EC2 security groups are used as the authorization mechanism. See also: AWS API Documentation Exceptions Examples Allows network ingress to a cache security group. Applications using ElastiCache must be running on Amazon EC2. Amazon EC2 security groups are used as the authorization mechanism. Expected Output: :example: response = client.authorize_cache_security_group_ingress( CacheSecurityGroupName='string', EC2SecurityGroupName='string', EC2SecurityGroupOwnerId='string' ) :type CacheSecurityGroupName: string :param CacheSecurityGroupName: [REQUIRED]\nThe cache security group that allows network ingress.\n :type EC2SecurityGroupName: string :param EC2SecurityGroupName: [REQUIRED]\nThe Amazon EC2 security group to be authorized for ingress to the cache security group.\n :type EC2SecurityGroupOwnerId: string :param EC2SecurityGroupOwnerId: [REQUIRED]\nThe AWS account number of the Amazon EC2 security group owner. Note that this is not the same thing as an AWS access key ID - you must provide a valid AWS account number for this parameter.\n :rtype: dict ReturnsResponse Syntax { 'CacheSecurityGroup': { 'OwnerId': 'string', 'CacheSecurityGroupName': 'string', 'Description': 'string', 'EC2SecurityGroups': [ { 'Status': 'string', 'EC2SecurityGroupName': 'string', 'EC2SecurityGroupOwnerId': 'string' }, ], 'ARN': 'string' } } Response Structure (dict) -- CacheSecurityGroup (dict) -- Represents the output of one of the following operations: AuthorizeCacheSecurityGroupIngress CreateCacheSecurityGroup RevokeCacheSecurityGroupIngress OwnerId (string) -- The AWS account ID of the cache security group owner. CacheSecurityGroupName (string) -- The name of the cache security group. Description (string) -- The description of the cache security group. EC2SecurityGroups (list) -- A list of Amazon EC2 security groups that are associated with this cache security group. (dict) -- Provides ownership and status information for an Amazon EC2 security group. Status (string) -- The status of the Amazon EC2 security group. EC2SecurityGroupName (string) -- The name of the Amazon EC2 security group. EC2SecurityGroupOwnerId (string) -- The AWS account ID of the Amazon EC2 security group owner. ARN (string) -- The ARN (Amazon Resource Name) of the cache security group. Exceptions ElastiCache.Client.exceptions.CacheSecurityGroupNotFoundFault ElastiCache.Client.exceptions.InvalidCacheSecurityGroupStateFault ElastiCache.Client.exceptions.AuthorizationAlreadyExistsFault ElastiCache.Client.exceptions.InvalidParameterValueException ElastiCache.Client.exceptions.InvalidParameterCombinationException Examples Allows network ingress to a cache security group. Applications using ElastiCache must be running on Amazon EC2. Amazon EC2 security groups are used as the authorization mechanism. response = client.authorize_cache_security_group_ingress( CacheSecurityGroupName='my-sec-grp', EC2SecurityGroupName='my-ec2-sec-grp', EC2SecurityGroupOwnerId='1234567890', ) print(response) Expected Output: { 'ResponseMetadata': { '...': '...', }, } :return: { 'CacheSecurityGroup': { 'OwnerId': 'string', 'CacheSecurityGroupName': 'string', 'Description': 'string', 'EC2SecurityGroups': [ { 'Status': 'string', 'EC2SecurityGroupName': 'string', 'EC2SecurityGroupOwnerId': 'string' }, ], 'ARN': 'string' } } :returns: AuthorizeCacheSecurityGroupIngress CreateCacheSecurityGroup RevokeCacheSecurityGroupIngress """ pass
13,783
def delete(profile_name): """Deletes a profile and its stored password (if any).""" message = ( "\nDeleting this profile will also delete any stored passwords and checkpoints. " "Are you sure? (y/n): " ) if cliprofile.is_default_profile(profile_name): message = f"\n'{profile_name}' is currently the default profile!\n{message}" if does_user_agree(message): cliprofile.delete_profile(profile_name) echo(f"Profile '{profile_name}' has been deleted.")
13,784
def generate_cyclic_group(order, identity_name="e", elem_name="a", name=None, description=None): """Generates a cyclic group with the given order. Parameters ---------- order : int A positive integer identity_name : str The name of the group's identity element Defaults to 'e' elem_name : str Prefix for all non-identity elements Default is a1, a2, a3, ... name : str The group's name. Defaults to 'Zn', where n is the order. description : str A description of the group. Defaults to 'Autogenerated cyclic group of order n', where n is the group's order. Returns ------- Group A cyclic group of the given order """ if name: nm = name else: nm = "Z" + str(order) if description: desc = description else: desc = f"Autogenerated cyclic group of order {order}" elements = [identity_name, elem_name] + [f"{elem_name}^" + str(i) for i in range(2, order)] table = [[((a + b) % order) for b in range(order)] for a in range(order)] return Group(nm, desc, elements, table)
13,785
def test_CSVBatchProcessor_DatasetParser(): """CSVBatchProcessor correctly identifies the localization files. """ knownDatasets = ['HeLaS_Control_IFFISH_A647_1_MMStack_Pos0_locResults.dat', 'HeLaS_Control_IFFISH_A647_2_MMStack_Pos0_locResults.dat', 'HeLaS_shTRF2_IFFISH_A647_1_MMStack_Pos0_locResults.dat', 'HeLaS_shTRF2_IFFISH_A647_2_MMStack_Pos0_locResults.dat'] assert_equal(len(bpCSV.datasetList), 4) for ds in bpCSV.datasetList: ok_(str(ds.name) in knownDatasets, 'Batch processor found a file not in the known datasets.')
13,786
def test_gf_low_TA(): """ Ensure mid lats, low res, retrieves proper file """ s1 = Swepy(os.getcwd(), ul="T", lr="T", high_res=False) s1.set_login() date = datetime.datetime(2006, 11, 4) file = s1.get_file(date, "19H") assert file == { "protocol": "http", "server": "localhost:8000", "datapool": "MEASURES", "resolution": "25km", "platform": "F15", "sensor": "SSMI", "date1": datetime.datetime(2006, 11, 4), "date2": datetime.datetime(2006, 11, 3), "channel": "19H", "grid": "T", "input": "CSU", "dataversion": "v1.3", "pass": "A", "algorithm": "GRD", }
13,787
def loadTextureBMP(filepath): """ Loads the BMP file given in filepath, creates an OpenGL texture from it and returns the texture ID. """ data = np.array(Image.open(filepath)) width = data.shape[0] height = data.shape[1] textureID = glGenTextures(1) glBindTexture(GL_TEXTURE_2D, textureID) glTexImage2D( GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data, ) # default parameters for now. Can be parameterized in the future glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR) glGenerateMipmap(GL_TEXTURE_2D) return textureID
13,788
def get_pixeldata(ds: "Dataset") -> "np.ndarray": """Return a :class:`numpy.ndarray` of the pixel data. .. versionadded:: 2.1 Parameters ---------- ds : pydicom.dataset.Dataset The :class:`Dataset` containing an :dcm:`Image Pixel <part03/sect_C.7.6.3.html>` module and the *Pixel Data* to be converted. Returns ------- numpy.ndarray The contents of (7FE0,0010) *Pixel Data* as a 1D array. """ expected_len = get_expected_length(ds, 'pixels') frame_len = expected_len // getattr(ds, "NumberOfFrames", 1) # Empty destination array for our decoded pixel data arr = np.empty(expected_len, pixel_dtype(ds)) generate_offsets = range(0, expected_len, frame_len) for frame, offset in zip(generate_frames(ds, False), generate_offsets): arr[offset:offset + frame_len] = frame return arr
13,789
def timeit(method): """ Timing Decorator Function Written by Fahim Sakri of PythonHive (https://medium.com/pthonhive) """ def timed(*args, **kwargs): time_start = time.time() time_end = time.time() result = method(*args, **kwargs) if 'log_time' in kwargs: name = kwargs.get('log_name', method.__name__.upper()) kwargs['log_time'][name] = int((time_end - time_start) * 1000) else: print('\n{} {:5f} ms'.format(method.__name__, (time_end - time_start) * 1000)) return result return timed
13,790
def allot_projects(): """ The primary function that allots the projects to the employees. It generates a maximum match for a bipartite graph of employees and projects. :return: A tuple having the allotments, count of employees allotted and total project headcount (a project where two people need to work will have a headcount ot two). """ allotments = [] try: emp_data = pd.read_pickle(EMPLOYEE_PICKLE_FILE) project_data = pd.read_pickle(PROJECT_PICKLE_FILE) except IOError as e: print("Either employee or project data is not present. No allocation done.") return [], 0, 0 employees = [] for _, emp_row in emp_data.iterrows(): transposed = emp_row.T transposed = transposed[transposed == 1] skills = set(transposed.index) employees.append( { 'name': emp_row['name'], 'value': skills } ) projects = [] for _, project_row in project_data.iterrows(): n = int(project_row['emp_count']) for i in range(n): projects.append( { 'absolute_name': project_row['name'], 'name': project_row['name'] + str(i), 'value': set(project_row[['domain', 'language', 'type']].values) } ) matrix = [] for e in employees: row = [] for p in projects: if len(e['value'].intersection(p['value'])) >= 2: row.append(1) else: row.append(0) matrix.append(row) employee_count = len(employees) project_count = len(projects) # An array to keep track of the employees assigned to projects. # The value of emp_project_match[i] is the employee number # assigned to project i. # If value = -1 indicates nobody is allocated that project. emp_project_match = [-1] * project_count def bipartite_matching(employee, match, seen): """ A recursive solution that returns true if a project mapping for employee is possible. :param employee: The employee for whom we are searching a project. :param match: Stores the assigned employees to projects. :param seen: An array to tell the projects available to employee. :return: `True` if match for employee is possible else `False`. """ # Try every project one by one. for project in range(project_count): # If employee is fit for the project and the project has not yet been # checked by the employee. if matrix[employee][project] and seen[project] is False: # Mark the project as checked by employee. seen[project] = True # If project is not assigned to anyone or previously assigned to someone else # (match[project]) but that employee could find an alternate project. # Note that since the project has been seen by the employee above, it will # not be available to match[project]. if match[project] == -1 or bipartite_matching(match[project], match, seen): match[project] = employee return True return False emp_allotted = 0 for emp in range(employee_count): # Mark all projects as not seen for next applicant. projects_seen = [False] * project_count # Find if the employee can be assigned a project if bipartite_matching(emp, emp_project_match, projects_seen): emp_allotted += 1 for p, e in enumerate(emp_project_match): if e != -1: allotments.append((employees[e]['name'], projects[p]['absolute_name'])) return allotments, emp_allotted, project_count
13,791
def test_ls_name_pattern(cmds): """ Validate we can list object matching provided node name pattern.""" cmds.createNode("transform", name="transformA") actual = cmds.ls("transform*") assert actual == [u"transformA"]
13,792
def set_threshold(scad, threshold): """ Set the threshold in the .sCAD file. None value is ignored and the existing value is kept. """ if threshold: scad['eom_etree'].set('warningThreshold', threshold)
13,793
def upload_record(data, headers, rdr_project_id): """ Upload a supplied record to the research data repository """ request_url = f"https://api.figsh.com/v2/account/projects/{rdr_project_id}/articles" response = requests.post(request_url, headers=headers, json=data) return response.json()
13,794
def datetime_to_ts(str_datetime): """ Transform datetime representation to unix epoch. :return: """ if '1969-12-31' in str_datetime: # ignore default values return None else: # convert to timestamp if '.' in str_datetime: # check whether it has milliseconds or not dt = tutil.strff_to_date(str_datetime) else: dt = tutil.strf_to_date(str_datetime) ts = tutil.date_to_ts(dt) return ts
13,795
def is_codenames_player(funct): """ Decorator that ensures the method is called only by a codenames player. Args: funct (function): Function being decorated Returns: function: Decorated function which calls the original function if the user is a codenames player, and returns otherwise """ @functools.wraps(funct) def wrapper(*args, **kwargs): if not current_user.is_authenticated or current_user.codenames_player is None: return None return funct(*args, **kwargs) return wrapper
13,796
def build_moduledocs(app): """Create per-module sources like sphinx-apidoc, but at build time and with customizations.""" srcdir = app.builder.srcdir moddir = srcdir + '/module' os.makedirs(moddir, exist_ok=True) basedir = os.path.dirname(srcdir) docs = [x[len(basedir)+1:-3].replace('/', '.').replace('.__init__', '') for x in glob35(basedir + '/aiocoap/**/*.py', recursive=True)] for x in docs: commonstart = textwrap.dedent("""\ {x} module ==================================================================================== """).format(x=x) if x in ('aiocoap.numbers', 'aiocoap.transports'): # They have explicit intros pointing out submodules and/or # describing any reexports text = commonstart + textwrap.dedent(""" .. automodule:: {x} .. toctree:: :glob: {x}.* """).format(x=x) elif x.startswith('aiocoap.cli.'): executablename = "aiocoap-" + x[len('aiocoap.cli.'):] # no ".. automodule:: {x}" because the doc string is already used # by the argparse, and thus would be repeated text = textwrap.dedent(""" {executablename} ============================== .. argparse:: :ref: {x}.build_parser :prog: {executablename} """).format(x=x, executablename=executablename) else: text = commonstart + textwrap.dedent(""" .. automodule:: {x} :members: :undoc-members: :show-inheritance: """).format(x=x) docname = "%s/%s.rst"%(moddir, x) if os.path.exists(docname) and open(docname).read() == text: continue else: with open(moddir + '/' + x + '.rst', 'w') as outfile: outfile.write(text) for f in os.listdir(moddir): if f.endswith('.rst') and f[:-4] not in docs: os.unlink(moddir + '/' + f)
13,797
def same_container_2(): """ Another reason to use `same_container=co.SameContainer.NEW` to force container sharing is when you want your commands to share a filesystem. This makes a download and analyze pipeline very easy, for example, because you simply download the data to the filesystem in one node, and the analyze node can automatically see it. There is no need to put the data in a separate data store. However, there is a downside to this `same_container` mode. When sharing a container, Exec nodes will _always run in serial_, even if the parent is a Parallel node. So, you lose the ability to parallelize. Also, when the SameContainer nodes finish, the container exits and that local filesystem is lost. To restore the container state you need to rerun all the nodes, making debugging or error resetting a little more awkward. """ dockerfile = "./docker/Dockerfile.curl" image = co.Image(dockerfile=dockerfile, context=".") with co.Parallel(image=image, doc=co.util.magic_doc()) as same_container_example: with co.Serial(name="shared_filesystem", same_container=co.SameContainer.NEW): data_url = "http://api.eia.gov/bulk/STEO.zip" co.Exec(f"curl {data_url} > /tmp/data.zip", name="download") co.Exec("unzip -pq /tmp/data.zip > /tmp/data", name="unzip") co.Exec("wc -l /tmp/data", name="analyze") with co.Parallel(name="always_serial", same_container=co.SameContainer.NEW): co.Exec("echo I cannot run in parallel", name="parallel_exec_1") co.Exec("echo even if I want to", name="parallel_exec_2") return same_container_example
13,798
def get(args) -> str: """Creates manifest in XML format. @param args: Arguments provided by the user from command line @return: Generated xml manifest string """ arguments = { 'target': args.target, 'targetType': None if args.nohddl else args.targettype, 'path': args.path, 'nohddl': args.nohddl } manifest = ('<?xml version="1.0" encoding="utf-8"?>' + '<manifest>' + '<type>config</type>' + '<config>' + '<cmd>get_element</cmd>' + '{0}' + '<configtype>' + '{1}' + '<get>' + '{2}' + '</get>' + '</configtype>' + '</config>' + '</manifest>').format( create_xml_tag(arguments, "targetType"), create_xml_tag(arguments, "target"), create_xml_tag(arguments, "path") ) print("manifest {0}".format(manifest)) return manifest
13,799