content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def getkey(value, key): """ Return a dictionary item specified by key """ return value[key]
708ee08610b97180be0e0c118646ff853bc7b2a6
700,391
def spring1s(ep,ed): """ Compute element force in spring element (spring1e). :param float ep: spring stiffness or analog quantity :param list ed: element displacements [d0, d1] :return float es: element force [N] """ k = ep return k*(ed[1]-ed[0]);
8253fcde40ecd1b66d7db99348297f2239faa23d
700,392
import subprocess def run_cmd(cmd): """Run console command. Return stdout print stderr.""" process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = process.communicate() if process.returncode > 0: print("The command " + cmd + " returned with errors!") print("Printing stderr and continuing") print(stderr) return stdout.strip()
7e940861c95a0544105b2a4c08f83eec29d1fa11
700,393
def _validate_workflow_var_format(value: str) -> str: """Validate workflow vars Arguments: value {str} -- A '.' seperated string to be checked for workflow variable formatting. Returns: str -- A string with validation error messages """ add_info = '' parts = value.split('.') if len(parts) == 2: # workflow.id, workflow.name, item.field_name typ, attr = parts if attr not in ('name', 'id'): add_info = 'The only valid workflow variables with two segments' \ ' are workflow.name and workflow.id.' elif len(parts) == 4: # workflow.inputs.parameters.<NAME> # workflow.inputs.artifacts.<NAME> # workflow.operators.<OPERATORNAME>.image typ, attr, prop, _ = parts if attr in ('inputs', 'outputs'): if prop not in ('parameters', 'artifacts'): add_info = 'Workflow inputs and outputs variables must be ' \ '"parameters" or "artifacts".' elif attr == 'operators': if parts[-1] != 'image': add_info = 'Workflow operator variable can only access ' \ 'the image name.' else: add_info = 'Workflow variables must reference to "inputs", "outputs" ' \ 'or "operators".' else: add_info = 'Workflow variables are either 2 or 4 segments.' return add_info
01f58acc27d9b04b59e4991094e2bda7f125f54b
700,394
def _calspec_file_parse_name_(stdname): """ """ return stdname.replace("+","_").lower()
43825c1a5b7b7c55a57031e15810a7b6b711d729
700,395
import re def remove_comments(codelines): """ Removes all comments from codelines. """ lines_removed = [] for l in codelines: # remove comments lines_removed.append(re.sub("#.*", "", l)) return lines_removed
50cbb10d14f111aac6ccc05fec6dd35842a272cd
700,396
import pandas def raw_difference_frame(raw_model,mean_frame,**options): """Creates a difference pandas.DataFrame given a raw NIST model and a mean pandas.DataFrame""" defaults={"column_names":mean_frame.columns.tolist()} difference_options={} for key,value in defaults.items(): difference_options[key]=value for key,value in options.items(): difference_options[key]=value difference_list=[] for row in raw_model.data[:]: #print row[0] mean_row=mean_frame[abs(mean_frame["Frequency"]-row[0])<abs(.01)].as_matrix() #print mean_row try: mean_row=mean_row[0] difference_row=[row[i+2]-mean_row[i] for i in range(1,len(mean_row))] difference_row.insert(0,row[0]) difference_list.append(difference_row) except:pass difference_data_frame=pandas.DataFrame(difference_list,columns=difference_options["column_names"]) return difference_data_frame
9af16e87791e23516e9ed7a3e287089716b6a98c
700,397
def diff_2nd_xx(fp, f0, fm, eps): """Evaluates an on-diagonal 2nd derivative term""" return (fp - 2.0*f0 + fm)/eps**2
8e46af3a52f75b3ad31ce93a9e12737b0a64872e
700,398
def valid_client_request_body(initialize_db): """ A fixture for creating a valid client model. Args: initialize_db (None): initializes the database and drops tables when test function finishes. """ return {'username': 'Leroy Jenkins', 'avatar_url': ''}
be2655fc5f338642d5e5901304195bb7d617528c
700,399
def HexToRGB(hex_str): """Returns a list of red/green/blue values from a hex string. @param hex_str: hex string to convert to rgb """ hexval = hex_str if hexval[0] == u"#": hexval = hexval[1:] ldiff = 6 - len(hexval) hexval += ldiff * u"0" # Convert hex values to integer red = int(hexval[0:2], 16) green = int(hexval[2:4], 16) blue = int(hexval[4:], 16) return [red, green, blue]
8d6129c1b660a9d928584c8b2263019ca3b06865
700,400
import subprocess def is_file_tracked(file, git="git", cwd=None): """ Args: file: relative path to file within a git repository cwd: optional path to change before executing the command Returns: true if the given file is tracked by a git repository, false otherwise """ ret = subprocess.call( [git, "ls-files", "--error-unmatch", file], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, cwd=cwd, ) return ret == 0
9cfc47515768bf55016119880048fc5ab0311107
700,402
def getBoardStr(board): """Return a text-representation of the board.""" return ''' {}|{}|{} 1 2 3 -+-+- {}|{}|{} 4 5 6 -+-+- {}|{}|{} 7 8 9'''.format(board['1'], board['2'], board['3'], board['4'], board['5'], board['6'], board['7'], board['8'], board['9'])
0bd05b2bf33477a7ba8115c3b5d9a7c8a4a1563c
700,403
def seed_from_str(s: str) -> int: """ Obtains an integer seed from a string using the hash function """ return hash(s) % (2 ** 32)
2fd808916f349102c15db945cc60b5d3793e50b7
700,404
import re def cleanse_sentences(tweet_list: list) -> list: """ Runs checks for the tweets so that most special characters, emojis, links and retweet tags are removed. :param tweet_list: List containing tweets :return: Cleansed list of strings. """ result = [] for tweet in tweet_list: tweet = re.sub(r'http\S+', '', tweet) tweet = re.sub(r'(RT|rt)( @\w*)?[: ]', '', tweet) tweet = tweet.lower() tweet = (tweet.encode('ascii', 'ignore')).decode("utf-8") tweet = tweet.replace("\\", "") tweet = tweet.replace("\n", "") tweet = tweet.replace("\r", "") tweet = tweet.replace("}", "") tweet = tweet.replace("{", "") tweet = tweet.replace("[", "") tweet = tweet.replace("]", "") tweet = tweet.replace("*", "") tweet = tweet.replace("_", "") tweet = tweet.replace("/", "") tweet = tweet.replace("`", "") tweet = tweet.replace("|", "") tweet = tweet.replace("~", "") tweet = tweet.replace("...", "") tweet = tweet.replace("....", "") tweet = tweet.strip() if tweet != "": result.append(tweet) return result
11d0641ea747beb8569a4600c553268ee197cbd4
700,405
def Pearson(endog, exdog): """ The def calculates the Pearson coefficient :param endog: The dependent variable. DataFrame :param exdog: The independent variable. Series :return: pearson; Pearson coefficient """ pearson = exdog.corrwith(endog) return pearson
cbb1e36c91cd969758b0d456d627fcb6798353de
700,406
def destos_to_binfmt(key): """ Returns the binary format based on the unversioned platform name, and defaults to ``elf`` if nothing is found. :param key: platform name :type key: string :return: string representing the binary format """ if key == 'darwin': return 'mac-o' elif key in ('win32', 'cygwin', 'uwin', 'msys'): return 'pe' return 'elf'
89e1233f494a80ea4b0dd9ef2053352e462e3b3d
700,407
def to_int(s, default=None): """Attempts to convert the provided string to an integer. :param s: The text to convert :param default: Default value to return if cannot be converted :returns: The integer if converted, otherwise the default value """ try: return int(s) except ValueError: pass return default
9f8214efc65035b433af22e9872cb4fe1e4e1cf7
700,408
def get_first_key(obj, key): """Return the value of the first key matching the given key. Recursively searches the obj. obj should contain at least one dict. Can be a nested dict or list of dicts or whatever. Returns None if not found """ if type(obj) is dict and key in obj: return obj[key] elif type(obj) is dict: for k,v in obj.items(): gfk = get_first_key(v, key) if gfk is not None: return gfk elif type(obj) is list: for item in obj: gfk = get_first_key(item, key) if gfk is not None: return gfk return None
034069fc0a9d73a78d387d6a78dae1ceea7c87b9
700,409
def equal_dicts(d1, d2, compare_keys=None, ignore_keys=None): """Check whether two dicts are same except for those ignored keys. """ assert not (compare_keys and ignore_keys) if compare_keys == None and ignore_keys == None: return d1 == d2 elif compare_keys == None and ignore_keys != None: return {k: v for k, v in d1.items() if k not in ignore_keys} == \ {k: v for k, v in d2.items() if k not in ignore_keys} elif compare_keys != None and ignore_keys == None: return {k: v for k, v in d1.items() if k in compare_keys} == \ {k: v for k, v in d2.items() if k in compare_keys} else: assert False
c2d4a3dace8504750ee8003db06abcecdef928b6
700,411
def _get_esquinidad(estado): """ - Récord: 2048>512 - Corre muy rápido: (13.8 us +- 613 ns) / estado - Favorece la esquina superior izquierda """ esquinidad = 0 m, n = estado.shape for i in range(m): for j in range(n - 1): if estado[i, j] < estado[i, j + 1]: break esquinidad += 1 for j in range(n): for i in range(m - 1): if estado[i, j] < estado[i + 1, j]: break esquinidad += 1 return esquinidad
1b001a7aa74e8ca9ddc2148cd8aeb06bda4d53dc
700,412
def get_partitions(num_items, buckets, prefix): """ Given a number of items and a number of buckets, return all possible combination of divider locations. Result is a list of lists, where each sub-list is set of divider locations. Each divider is placed after the 1-based index provided (or, alternately, *before* the zero-based index provided). """ partitions = [] num_dividers = len(prefix) if num_dividers: last_divider_loc = prefix[-1] else: last_divider_loc = 0 remaining_dividers = (buckets - 1) - num_dividers for next_divider_loc in range(last_divider_loc + 1, num_items - (remaining_dividers - 1)): new_prefix = prefix.copy() + [next_divider_loc] if remaining_dividers == 1: partitions.append(new_prefix) else: partitions.extend(get_partitions(num_items, buckets, new_prefix)) return partitions
503d7afcb544e231c00a83ed77adfe95c4283d16
700,413
def split_cfg_comma(s): """The simplest and dumbest Context-Free Grammar parser. Just cares about commas and parenthesis depth.""" elems = [""] depth = 0 for c in s: if depth == 0 and c == ",": elems.append("") else: if c == "(": depth += 1 elif c == ")": depth -= 1 elems[-1] += c elems = list(map(str.lstrip, elems)) return elems
821033e6acc10618cd5927ae645136d71894ef57
700,414
def normalize_rate(rate): """ Function to change any string 'n/a' values in rate limit information to None values. :param rate: dictionary :return: dictionary """ for key in rate.keys(): if rate[key] == "n/a": rate[key] = None return rate
1539cd2059724be8b474d2b81b761164837274c8
700,415
import re def parse_tf(constants : dict) -> set: """Read user configured variables in variables tf and return the entire set Args: constants: config read from config.yml Returns: all variables defined in variables.tf """ magma_root = constants['magma_root'] tf_root = f'{magma_root}/orc8r/cloud/deploy/terraform' # parse input variables orc8r_var_fn_list = [ f'{tf_root}/orc8r-aws/variables.tf', f'{tf_root}/orc8r-helm-aws/variables.tf' ] var_search_re = re.compile(r'variable\s\"(?P<variable_name>\w+?)\"\s\{') actual_vars = set() for fn in orc8r_var_fn_list: with open(fn) as f: # parse variables for line in f.readlines(): m = var_search_re.search(line) if m and m.group('variable_name'): actual_vars.add(m.group('variable_name')) # parse output variables orc8r_var_fn_list = [ f'{tf_root}/orc8r-aws/outputs.tf', ] # remove variables which are set through outputs output_search_re = re.compile(r'output\s\"(?P<variable_name>\w+?)\"\s\{') for fn in orc8r_var_fn_list: with open(fn) as f: # parse variables for line in f.readlines(): m = output_search_re.search(line) if m and m.group('variable_name'): output_var = m.group('variable_name') if output_var in actual_vars: actual_vars.remove(output_var) return actual_vars
814dcde6bb7b84c9898387f1eb8c3c7d82cf117d
700,416
def _update_sheet_with_totals(worksheet, totals_cell_list, coinbase_account) -> None: """ Update the worksheet with totals """ # Set cell values with totals totals_cell_list[0].value = coinbase_account['current_value'] totals_cell_list[1].value = coinbase_account['current_unrealized_gain'] totals_cell_list[2].value = coinbase_account['current_performance'] worksheet.update_cells(totals_cell_list) return None
70f5ec018c3fbc23e645abad7d55fffade0d6dbc
700,417
def filterdictvals(D, V): """ dict D with entries for valeus V removed. filterdictvals(dict(a=1, b=2, c=1), 1) => {'b': 2} """ return {K: V2 for (K, V2) in D.items() if V2 != V}
5bfafd8d5c383791b2dd3f792c48cf7a6c16a9de
700,418
def extract_ace (archive, compression, cmd, verbosity, interactive, outdir): """Extract an ACE archive.""" cmdlist = [cmd, 'x'] if not outdir.endswith('/'): outdir += '/' cmdlist.extend([archive, outdir]) return cmdlist
f4952f9e484e5f31fbe243318bb875ca668de6b6
700,419
def solve2(wires): """ A brute-force O(N**2) solution is fine since O(N**2) is really at most (10**4)**2 = 10**8, or one-hundred-million comparisons. """ result = 0 for w1 in wires: for w2 in wires: result += int(w1[0] < w2[0] and w1[1] > w2[1]) return result
963591c2b41c30a1e6c4817dd61dc0f106730a5b
700,420
def parse_regions(text): """Return a list of (start, end) tuples.""" _regions = [] region_pairs = text.strip().split(",") for region_pair in region_pairs: split_pair = region_pair.split("..") start = split_pair[0] end = split_pair[1] _regions.append([start, end]) return _regions
20a0de9fe4f75fbdc9c8f006d675e53f0b305d62
700,421
def handle_already_linked( media_list: list, offline_types: list = ["Offline", "None"] ) -> list: """Remove items from media-list that are already linked to a proxy. Since re-rendering linked clips is rarely desired behaviour, we remove them without prompting. If we do want to re-render proxies, we can unlink them from Resolve and we'll be prompted with the 'unlinked media' warning. By default if a proxy item is marked as 'Offline' it won't be removed since we most likely need to re-render it. Args: media_list: list of dictionary media items to check for linked proxies.\ retain_types: list of strings of `Proxy` types to keep in returned `media_list` even if linked. Returns: media_list: refined list of dictionary media items that are not linked to a proxy. """ print(f"[cyan]Checking for source media with linked proxies.[/]") already_linked = [x for x in media_list if str(x["Proxy"]) not in offline_types] if len(already_linked) > 0: print(f"[yellow]Skipping {len(already_linked)} already linked.[/]") media_list = [x for x in media_list if x not in already_linked] print("\n") return media_list
4ccd28c924e5ffcf803c0f4de8f5cb95b9baca40
700,422
def _update_user_inputs(kwargs, default_parameters, check_parameters): """Update keyword parameters with user inputs if applicable.""" parameters = default_parameters.copy() # update the parameter with user input not_needed = [] for key in check_parameters: value = kwargs.pop(key, None) # get user input if value is not None: parameters[key] = value # global_signal parameter is not in default strategy, but # applicable to every strategy other than compcor # global signal strategy will only be added if user has passed a # recognisable value to the global_signal parameter if key == "global_signal": if isinstance(value, str): parameters["strategy"] += ("global_signal", ) else: # remove global signal if not updated parameters.pop("global_signal", None) # collect remaining parameters in kwargs that are not needed not_needed = list(kwargs.keys()) return parameters, not_needed
9e67e30cddf8529bf492cd7243bdbada40ef97fa
700,424
import re def _get_numbers_from_string(string): """ Return a list of numbers (strings) that appear in parameter string. Match integers, decimals and numbers such as +1, 2e9, +2E+09, -2.0e-9. """ numbers = re.findall(r"[-+]?(?:(?:\d+\.\d+)|(?:\d+))(?:[Ee][+-]?\d+)?", string) return numbers
970d57aaec18bdc6ab4a9c55f6445fbbc74998d7
700,426
def filter_by_response(words, guess, response): """ Parses the Wordle guess response and filters out invalid words """ for idx,chr in enumerate(guess): if response[idx] == ".": words = [word for word in words if not chr in word] elif response[idx] == chr.upper(): words = [word for word in words if word[idx] == chr] elif response[idx] == chr: words = [word for word in words if word[idx] != chr and chr in word] return words
77a678f6603fb16e34a5b74a3350e560efa4f82d
700,427
def GetVersion(vm): """Returns the version of the memcached server installed.""" results, _ = vm.RemoteCommand('memcached -help |grep -m 1 "memcached"' '| tr -d "\n"') return results
ba0a16d7e9f8f7702d6f5f41595a6a47a39e0f6b
700,428
def get_sentiment_compound(value): """ function get value and return whether it's Positive, Negative or Neutral :param value: floating number :return: whether value is Positive, Negative or Neutral """ # decide sentiment as positive, negative and neutral if value > 0: sentiment_compound = "Positive" elif value < 0: sentiment_compound = "Negative" else: sentiment_compound = "Neutral" return sentiment_compound
7feae8fd085efb9a2e684844a25ab1a1d9b3dad0
700,430
from functools import reduce def run_map(generator, mapper) -> list: """ Map function caller for each line in each file :param generator: file and line generator :param mapper: map function to be called for each line in the files :return: generator of key value pairs returned by the map function """ return reduce(lambda x, y: x + y, map(lambda kv_file_lines: reduce(lambda x, y: x + y, map(lambda line: list(mapper(kv_file_lines[0], line)), kv_file_lines[1])), generator))
468467f9274572d82b8843449ffe9691b68331cc
700,431
from typing import List import traceback def logger(func): """ logger """ def wrapper(*args, **kwargs): try: print(f'start {func.__name__}, args: {args}, kwargs: {kwargs}') result = func(*args, **kwargs) print(f'finish {func.__name__}, result: {result}') return result except: formatted_lines: List[str] = traceback.format_exc().splitlines() print(formatted_lines) return { "success": False, "message": '\n'.join(formatted_lines) } return wrapper
99b566c50585bf4514c43b76405fcf1dd43ff948
700,432
import os def create_segment_mfcc_export_dirs(export_path: str, segment_parent_dirs: list) -> list: """ Creates export directories for mfccs for song segments and returns them as a list of file paths :param export_path: str :param segment_parent_dirs: list :return: list """ if not os.path.isdir(export_path): os.mkdir(export_path) song_segment_mfcc_export_paths = [] for i in segment_parent_dirs: full_seg_mfcc_export_dir = os.path.join(export_path, os.path.basename(i)) if not os.path.isdir(full_seg_mfcc_export_dir): os.mkdir(full_seg_mfcc_export_dir) song_segment_mfcc_export_paths.append(full_seg_mfcc_export_dir) return song_segment_mfcc_export_paths
501828589070a941904f71ccc6ac897e240ccd37
700,433
def put_pm_to_pandas_data(data: dict) -> dict: """ Change the +- to \pm for latex display. Note: to have the pandas frame display the table string correctly use the escapte=False as in: latex_table: str = df.to_latex(index=False, escape=False, caption='caption goes here', label='label_goes_here') ref: - https://stackoverflow.com/questions/70008992/how-to-print-a-literal-backslash-to-get-pm-in-a-pandas-data-frame-to-generate-a """ for column_name, data_values in data.items(): data[column_name] = [data_value.replace('+-', ' $\pm$ ') for data_value in data_values] return data
50193af607b8321601f35350283386021509b1bd
700,434
def nrvocale(text): """Scrieti o functie care calculeaza cate vocale sunt intr-un string""" count = 0 for c in text: if c in ['a', 'e', 'i', 'o', 'u']: count = count + 1 return count
083d0bd8e8954a795fe36abaf567472561a9e13f
700,435
import pathlib def abst_path(path): """Returns a PurePath for a string representation, after checking a path string is from the root.""" p = pathlib.PurePath(path) assert p.is_absolute() return p
b86ecd8585575e3642ddd1ff54de91eb1cd5d6d9
700,436
import mpmath import typing def default_val(utype): """ Returns a generic default value for a given type""" if utype is int: return 0 elif utype is float: return 0. elif utype is mpmath.mpf: return mpmath.mpf("0.0") elif utype is str: return "" elif utype is bool: return False elif utype is type(None): return None elif typing.get_origin(utype) is typing.Literal: return typing.get_args(utype)[0] # first element of the Literal elif typing.get_origin(utype) is typing.Union: return default_val(typing.get_args(utype)[0]) # first def of the Union else: raise NotImplementedError("No default for this subtype {}".format( utype))
aa2fc3cbba5db3ddee6ff20fa86a53f28d9381bc
700,437
def nvl(value, default): """ Evaluates if value es empty or None, if so returns default Parameters: value: the evalue to evaluate default: the default value Returns: value or default """ if value: return value return default
67df45a6e63c107dcef99fc7bdbaa7064b695f66
700,438
def derive_aggregation(dim_cols, agg_col, agg): """Produces consistent aggregation spec from optional column specification. This utility provides some consistency to the flexible inputs that can be provided to charts, such as not specifying dimensions to aggregate on, not specifying an aggregation, and/or not specifying a column to aggregate on. """ if dim_cols == 'index' or agg_col == 'index' or dim_cols is None: agg = None agg_col = None elif agg_col is None: if isinstance(dim_cols, list): agg_col = dim_cols[0] else: agg_col = dim_cols agg = 'count' return agg_col, agg
402c92a14b81d0e07ab4d36ccfd0854d0059d666
700,439
def create_list_from_file(input_file): """Read a file into a list of lists where the nested list is split on whitespace.""" file_list = [] with open(input_file) as inputfile: for line in inputfile: file_list.append(line.strip().split(' ', 1)) # the check sum and file name are separated by a double whitespace # print(file_list) return file_list
44008af32ead4ceff3597e8c479c100a12c7de15
700,440
import hashlib def hash_password(password): """ Normal MD5, except add c if a byte of the digest is less than 10. """ password_md5 = hashlib.md5(password.encode("utf-8")).hexdigest() for i in range(0, len(password_md5), 2): if password_md5[i] == "0": password_md5 = password_md5[0:i] + "c" + password_md5[i + 1 :] return password_md5
c806cbe22b99cd861ef4c9d19f971e6d61216c72
700,441
def test_inside_lambda(): """ >>> obj = test_inside_lambda()() >>> next(obj) 1 >>> next(obj) 2 >>> next(obj) Traceback (most recent call last): StopIteration """ return lambda:((yield 1), (yield 2))
f737fce5c04b1bb7a9b7b8a15edc60571732c2d8
700,442
def isnetid(s): """ Returns True if s is a valid Cornell netid. Cornell network ids consist of 2 or 3 lower-case initials followed by a sequence of digits. Examples: isnetid('wmw2') returns True isnetid('2wmw') returns False isnetid('ww2345') returns True isnetid('w2345') returns False isnetid('WW345') returns False Parameter s: the string to check Precondition: s is a string """ assert type(s) == str var1 = s[:2] var2 = s[2:] var3 = s[:3] var4 = s[3:] result = ((var1.isalpha() and var1.islower() and var2.isnumeric() and ('-' not in s)) or (var3.isalpha() and var3.islower() and var4.isnumeric() and ('-' not in s))) return result #pass
d4ddd91a9a7a7a4e2e7de778525718ec41c42cbc
700,443
def delete_column(idf, list_of_cols, print_impact=False): """ :param idf: Input Dataframe :param list_of_cols: List of columns to delete e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :return: Dataframe after dropping columns """ if isinstance(list_of_cols, str): list_of_cols = [x.strip() for x in list_of_cols.split("|")] list_of_cols = list(set(list_of_cols)) odf = idf.drop(*list_of_cols) if print_impact: print("Before: \nNo. of Columns- ", len(idf.columns)) print(idf.columns) print("After: \nNo. of Columns- ", len(odf.columns)) print(odf.columns) return odf
020ffa98855bdfa3de934122cbd6df79bc2c00e6
700,444
from pathlib import Path import glob def filter_paths(paths, excluded_paths): """Filter out path matching one of excluded_paths glob Args: paths: path to filter. excluded_paths: List for glob of modules to exclude. Returns: An iterable of paths Python modules (i.e. *py files). """ excluded = set(Path(f) for excluded_path in excluded_paths for f in glob.glob(excluded_path, recursive=True)) return set(paths) - excluded
8b8e9e19ebc0a47d84e1b55a497438b961af6764
700,445
def merge_runs_by_tag(runs, tags): """ Collect the (step, value) tuples corresponding to individual tags for all runs. Therefore the result might look like this: <tagA> + step: - <run-1-steps> - <run-2-steps> + value: - <run-1-values> - <run-2-values> ... Arguments: runs (dict): Collection of data from all runs. Usually the output of `visualization.helpers.load_run` tags (list): List of the tags to merge. Returns: data (dict): Dictionary containing the merged scalar data of all runs. The data.keys is composed of `tags`. """ _merged_runs = dict() # Merge the steps and values for each tag over all runs. for tag in tags: _run_values = [runs[run][tag]['value'] for run in runs] _run_steps = [runs[run][tag]['step'] for run in runs] _merged_runs[tag] = { 'step': _run_steps, 'value': _run_values } return _merged_runs
90440ec8d718692978e617920eac52e0ea43bfce
700,446
import math def compute(formula, key, amount=1): """Function that computes the amount of fuel needed to create the component KEY. Uses the formula input to follow the process chain.""" # If you reach FUEL, just return how much <fuel> you need if key == "FUEL": return amount # Count how many <key> componenets needed for <amount> FUEL s = 0 for form in formula[key]: # Compute how many sub-components you need for <amount> of FUEL total = compute(formula, form[-1], amount) # Round up the conversion process to the closest integer s += int(math.ceil(total / float(form[1])) * form[0]) return int(s)
9c48f342f5771c8922ea19c101c613e34f884f4f
700,447
import numpy def moment(array,substract_one_in_variance_n=True): """ Calculate the first four statistical moments of a 1D array :param array: :param substract_one_in_variance_n: :return: array with: m0 (mean) m1 (variance) m2 (skewness) m3 (kurtosis) """ a1 = numpy.array(array) m0 = a1.mean() tmp = (a1-m0)**2 if substract_one_in_variance_n: m1 = tmp.sum()/(a1.size-1) else: m1 = tmp.sum()/(a1.size) sd = numpy.sqrt(m1) tmp = (a1-m0)**3 m2 = tmp.sum()/sd**3/a1.size tmp = (a1-m0)**4 m3 = (tmp.sum()/sd**4/a1.size) - 3 #Fisher definition: substract 3 to return 0 for Normal distribution return m0,m1,m2,m3
9eae3d87b76ece9f6d351fe17de2bf439a447718
700,448
from typing import List from typing import Tuple def Gbk_presel(best_gain: List[int], cand1: int, cand2: int, gcode0: int) -> Tuple[int, int]: """ # (i) [0] Q9 : unquantized pitch gain # (i) [1] Q2 : unquantized code gain # (o) : index of best 1st stage vector # (o) : index of best 2nd stage vector # (i) Q4 : presearch for gain codebook """ return (cand1, cand2)
1c88fd0c667199647ec9680071ec2ce620dea567
700,449
import struct def getCommandString(commandCode): """Returns a readable string representation of a message code """ return struct.pack('<L', commandCode)
1e0c63a13f9a727cc4aac7a6d24eacad958b0040
700,450
def get_root_url(g, website_url): """Given website url, get its root node.""" return ( g.V(website_url) .hasLabel("website") .in_("links_to") )
662eca8a6d1e6ea1e4e19d8807c3467154a8e715
700,451
def authenticated_user(client, account): """Create an authenticated user for a test""" # user = G(User, email='test@gmail.com') account.email = 'test@gmail.com' account.set_password('my_password123') account.save() client.login(email='test@gmail.com', password='my_password123') return account
b1156f21ca94129fbf0dee8d0b0dbac834fbf59d
700,452
import torch def prepare_values(y_true, y_pred): """Converts the input values to numpy.ndarray. Parameters ---------- y_true : torch.tensor Either a CPU or GPU tensor. y_pred : torch.tensor Either a CPU or GPU tensor. Returns ------- y_true, y_pred : numpy.ndarray Numpy.ndarray of the input tensor data. """ if isinstance(y_true, torch.Tensor): if y_true.is_cuda: y_true = y_true.to(torch.device("cpu")) y_true = y_true.numpy() if isinstance(y_pred, torch.Tensor): if y_pred.is_cuda: y_pred = y_pred.to(torch.device("cpu")) y_pred = y_pred.numpy() return y_true, y_pred
f5fa8a05a3d29c00d7ea310a91a09db1158524be
700,453
def get_pydot_attributes(index, dot): """Helper function to get attributes from pydot graph given index""" return dot.get_subgraphs()[index].get_nodes()[0].get_attributes()
8fa6fc43d2318f172785567127c1909a5b0e3587
700,454
def parse_taxon(file_name): """ :param file_name: :return: """ res = set() with open(file_name, 'r') as IN: for line in IN: res.add(line.strip()) return res
779626ad6c52c73d714408087f8f95ab85130735
700,455
import time def time_convert(ens, cls_to, **kwds): """ Time conversion function """ t0 = time.time() ens_out = ens.convert_to(cls_to, **kwds) t1 = time.time() print("Convert %s to %s with %i pdfs in %.2f s" % (type(ens.gen_obj), cls_to, ens.frozen.npdf, t1-t0)) return ens_out
0869d5c710f888c090a01ffd8d0ffa0108e8fa4d
700,456
import os def install_user(): """ returns current user """ user = os.getenv('USER', None) if user is None: raise Exception("Unable to determine current user.") return user
ca83a4dfb83a3b7fe1ba9129afb39e387bfd059f
700,457
import html def make_email_lists(items): """Make an HTML and plain text list of items, to be used in emails. """ if not items or len(items) == 0: return "", "" htm = ["<li>{}</li>".format(html.escape(i if i else "\'blank\'")) for i in items] htm = "<ul>{}</ul>".format("".join(htm)) text = [f" - {i}" for i in items] text = "\r\n".join(text) return htm, text
38fa2cd0abb1e95e1c8294050a5957ac48d2e7c7
700,458
def is_network_rate_error(exc): """ :param exc: Exception Exception thrown when requesting network resource :return: bool True iff exception tells you abused APIs """ keys = ["429", "Connection refused"] for key in keys: if key in str(exc): return True return False
6302dd39dcf536e0522fa801b245d21d7e3c5358
700,459
import os def get_filenames(is_training,datadir): """Returns a list of filenames.""" assert os.path.exists(datadir), ( 'Can not find data at given directory!!') if(is_training): labels = [] data_dir = [] with open('/home/qnie/PycharmProjects/ntumotion/training_protocol/fEDM_R_CS_trainimgs.txt') as f: for line in f: data_dir.append(datadir+line.strip()) f.close() with open('/home/qnie/PycharmProjects/ntumotion/training_protocol/fEDM_R_CS_trainlabels.txt') as f: for line in f: labels.append(int(line.strip())) f.close() else: labels = [] data_dir = [] with open('/home/qnie/PycharmProjects/ntumotion/training_protocol/fEDM_R_CS_testimgs.txt') as f: for line in f: data_dir.append(datadir + line.strip()) f.close() with open('/home/qnie/PycharmProjects/ntumotion/training_protocol/fEDM_R_CS_testlabels.txt') as f: for line in f: labels.append(int(line.strip())) f.close() return data_dir, labels
355e52ec93fc5b8b6922f68b4050524b13b8ddb7
700,460
def join_url(*sections): """ Helper to build urls, with slightly different behavior from urllib.parse.urljoin, see example below. >>> join_url('https://foo.bar', '/rest/of/url') 'https://foo.bar/rest/of/url' >>> join_url('https://foo.bar/product', '/rest/of/url') 'https://foo.bar/product/rest/of/url' >>> join_url('https://foo.bar/product/', '/rest/of/url') 'https://foo.bar/product/rest/of/url' >>> # We need this helper because of below ... # urllib.parse.urljoin behavior that is not suitable ... # for our purpose. >>> import urllib.parse >>> urllib.parse.urljoin('https://foo.bar/product', '/rest//of/url') 'https://foo.bar/rest/of/url' """ return '/'.join(s.strip('/') for s in sections)
ec535d3efcbf43a4fad4ed78e1b03e20e665d163
700,461
import traceback def get_vertices(arcpyPolyline, reverse = False): """Returns points of a polyline feature class as orded list of points :param arcpyPolyline: :return: list of points """ try: points = [] for part in arcpyPolyline: for pnt in part: if pnt: points.append(pnt) if not reverse: return points else: rev_points = [] for i, point in enumerate(points): rev_points.append(points[len(points) - i - 1]) return rev_points except: print('{0}'.format(traceback.format_exc()))
94b377baeb008f07ee410955eb393b00ace1a176
700,462
def getTaggedCommit(repository, sha1): """Returns the SHA-1 of the tagged commit. If the supplied SHA-1 sum is a commit object, then it is returned, otherwise it must be a tag object, which is parsed to retrieve the tagged object SHA-1 sum.""" while True: git_object = repository.fetch(sha1) if git_object.type == "commit": return sha1 elif git_object.type != "tag": return sha1 = git_object.data.split("\n", 1)[0].split(" ", 1)[-1]
e9c8f9d41e22612fa4f29624c748f902e5f8db17
700,463
import re def is_valid_hostname(hostname): """ Check that hostname passed in is valid. Pretty much a copy paste of this https://stackoverflow.com/questions/2532053/validate-a-hostname-string """ if len(hostname) > 255: return False if hostname[-1] == ".": hostname = hostname[:-1] # strip exactly one dot from the right, if present allowed = re.compile("(?!-)[A-Z0-9-]{1,63}(?<!-)$", re.IGNORECASE) return all(allowed.match(x) for x in hostname.split("."))
aacf57c7ef740c4cebfe008c67867921d4b8a958
700,464
def delta_soga_from_masso(m_orig, m_new, s_orig): """Infer a change in global average salinity from mass""" delta_s = s_orig * ((m_orig / m_new) - 1) return delta_s
a0ffb18378c3d997ea8ab88286a11e34b9abbb71
700,465
def rreplace(s, old, new, occurrence = 1): """ Replaces the last occurence(s) of an expression in a string. """ return new.join(s.rsplit(old, occurrence))
92a3fdb0e5a2014debd6e3530c7c6c754ed45953
700,466
def get_cal_params(power_data_dict,particle_data,config_header,config_transducer): """ Get calibration params from the unpacked file Parameters come from config_header and config_transducer (both from header), as well as particle_data (from .RAW file) """ cal_params = [] for ii in range(len(power_data_dict)): cal_params_tmp = {} cal_params_tmp['soundername'] = config_header['sounder_name']; cal_params_tmp['frequency'] = config_transducer[ii]['frequency']; cal_params_tmp['soundvelocity'] = particle_data[0]['zplsc_sound_velocity'][ii]; cal_params_tmp['sampleinterval'] = particle_data[0]['zplsc_sample_interval'][ii]; cal_params_tmp['absorptioncoefficient'] = particle_data[0]['zplsc_absorption_coeff'][ii]; cal_params_tmp['gain'] = config_transducer[ii]['gain'] # data.config(n).gain; cal_params_tmp['equivalentbeamangle'] = config_transducer[ii]['equiv_beam_angle'] # data.config(n).equivalentbeamangle; cal_params_tmp['pulselengthtable'] = config_transducer[ii]['pulse_length_table'] # data.config(n).pulselengthtable; cal_params_tmp['gaintable'] = config_transducer[ii]['gain_table'] # data.config(n).gaintable; cal_params_tmp['sacorrectiontable'] = config_transducer[ii]['sa_correction_table'] # data.config(n).sacorrectiontable; cal_params_tmp['transmitpower'] = particle_data[0]['zplsc_transmit_power'][ii] # data.pings(n).transmitpower(pingNo); cal_params_tmp['pulselength'] = particle_data[0]['zplsc_pulse_length'][ii] # data.pings(n).pulselength(pingNo); cal_params_tmp['anglesensitivityalongship'] = config_transducer[ii]['angle_sensitivity_alongship'] # data.config(n).anglesensitivityalongship; cal_params_tmp['anglesensitivityathwartship'] = config_transducer[ii]['angle_sensitivity_athwartship'] #data.config(n).anglesensitivityathwartship; cal_params_tmp['anglesoffsetalongship'] = config_transducer[ii]['angle_offset_alongship'] # data.config(n).anglesoffsetalongship; cal_params_tmp['angleoffsetathwartship'] = config_transducer[ii]['angle_offset_athwart'] # data.config(n).angleoffsetathwartship; cal_params_tmp['transducerdepth'] = particle_data[0]['zplsc_transducer_depth'][ii] # data.pings(n).transducerdepth(pingNo); cal_params.append(cal_params_tmp) return cal_params
081e3cf5d48d14056948617bc01d65205ed242aa
700,467
import re def num_groups(aregex): """ Counts groups in regexp """ return re.compile(aregex).groups
3ce4dc9f08ec5ae2e0edfeac889c213569a3053e
700,468
import os def normpath(path): """Return a normalized path in canonical form.""" return os.path.normpath(path).replace(os.sep, '/')
90980985fe8ce8e4fbca517c8663c04ac7e518d3
700,469
def load_report(report): """Splits the report from a string into a list of list of "bits" """ return [list(c for c in code) for code in report.split("\n") if len(code)]
18de97d81b174e03a760b1a051433bb23dd0fe7f
700,470
def attr_visitor_name(attr_name: str) -> str: """ Returns the visitor_method name for `attr_name`, e.g.:: >>> attr_visitor_name('class') 'attr_class' """ # assert re.match(r'\w+$', node_name) return 'attr_' + attr_name
5fb31f9ea9d563ba05b4a80046c0b48ace35e9b5
700,471
from typing import List def mk_string(strings: List[str], separator: str) -> str: """ Creates a string from a list of strings, with a separator in between elements. :param strings: the list of strings. :param separator: the separator. :return: the string of elements separated by the separator. """ result = '' sep = '' for string in strings: result += f'{sep}{string}' sep = separator return result
0230022eb3168c3dae92b6edb1cc0cdf252158d6
700,472
def Normalize(df): """ This function takes a pandas dataframe and normalize it Arguments ---------- - df: pandas dataframe Return ---------- - df: pandas dataframe The initial dataframe normalized """ df = (df - df.min())/(df.max() - df.min()) return df
b16a196ea14c93d2100d8030ef1417a7112560c7
700,473
import os def emp_img_exists(filepath): """ # For use if hosting static content in a remote location try: urllib.request.urlopen(STATIC_URL+filepath) return filepath except: index = filepath.rfind('/') new_filepath = filepath[:index] + '/default.jpg' return new_filepath """ # For use if hosting static content locally if os.path.isfile(os.getcwd()+"/static/employees/"+filepath+".gif"): return "employees/"+filepath+".gif" else: index = filepath.rfind('/') new_filepath = '/default.jpg' return new_filepath
c72748b82d21f74864478e0b84cb6d437ec11ac0
700,474
def _filter_featured_downloads(lst): """Filter out the list keeping only Featured files.""" ret = [] for item in lst: if 'Featured' in item['labels']: ret.append(item) return ret
d722fdd01966f1650575912715f8a6f07d793dda
700,475
def alphabet_index(text: str) -> str: """Replaces each letter with its appropriate position in the alphabet.""" return " ".join([str(ord(x.lower())-96) for x in text if ord(x.lower())-96 >= 1 and ord(x.lower())-96 < 27])
1221477a924121f50abc79c6afc55fc09ebc88b9
700,476
def handle_no_range(node): """Returns stl_node with range set to [0,0] if not previously set""" if node.range_start==None or node.range_end==None: node.range_start = 0 node.range_end = 0 return node
1baad2869cf769d6caac73e52d2799d04b1fc16d
700,477
def merge_kwargs(kwargs, defaults): """Helper function to merge ``kwargs`` into ``defaults``. Args: kwargs: Keyword arguments. defaults: Default keyword arguments. Returns: Merged keyword arguments (``kwargs`` overrides ``defaults``). """ if defaults is not None: kw = defaults.copy() kw.update(kwargs) else: kw = kwargs return kw
09a887bbcefdd2e0795fee043354d4bdf8e806a8
700,478
def format_decimal(value): """Format value to 2 decimal places""" formatter = "{0:.2f}" return float(formatter.format(value))
694efd34d7e36d493a66ef25ab5a552f93eac087
700,479
import re def rename_pretrained(name: str): """ Matches the name of a variable saved in the pre-trained MobileNet networks with the name of the corresponding variable in this network. https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md Parameters ---------- name: the original name of the variable. Returns ------- name: the new name of the variable. """ replace_list = { r"BatchNorm": r"batch_normalization", r"Conv2d_(\d+)_(\w+)/": r"Conv2d_\1/\2/", r"Conv2d_1c_1x1/biases": r"conv2d_final/bias", r"Conv2d_1c_1x1/weights": r"conv2d_final/kernel", r"depthwise_weights": r"weights" } for match, replace in replace_list.items(): name = re.sub(match, replace, name) return name
6913159aef0d9b4019af34dcd72984d3dd6870e9
700,480
def do_ldap_search(conn, query): """ Search Yale LDAP for a particular netid. On the command line this would be something like the following: ldapsearch \ -H ldaps://ad.its.yale.edu:3269 \ -D cn=s_klj39,OU=Non-Netids,OU=Users-OU,dc=yu,dc=yale,dc=edu \ -w $LDAP_PASS "(&(objectclass=Person)(CN=klj39))" """ search_base = "" search_filter = "(&(objectclass=Person)({0}))".format(query) attributes = "*" conn.search(search_base, search_filter, attributes=attributes) try: return conn.entries[0] except IndexError: return None
183d126b3dda117a42d1a8a93a5a90fbd752eb92
700,481
def anagram(word1, word2): """Determines if two words are anagram of each other NOTE: This solutoin does not work for all test cases. """ # if the strings are not equal length they can't be anagram if len(word1) != len(word2): return False s1 = 0 s2 = 0 # add up the ascii values of each chars for i in range(len(word1)): s1 += ord(word1[i]) s2 += ord(word2[i]) # compare if sums are equal return s1 == s2
d90d58723986da8fdab4c01433d074cb2f0151b2
700,482
def build_table_def(table_name, keys, additional_attributes=None, global_secondary_indices=None): """ Creates a minimal dynamodb definition suitable for use with localstack. Args: table_name: The full name of the test table keys: The key definitions to use - a list of 4-tuples (<name>, <key_type>, <data_type>, <index_name>). For example - ('cz_res_id', 'HASH', 'S', 'MyGSI') additional_attributes: additional attributes, beyond the keys, that need to be defined global_secondary_indices: The list of keys for which global secondary indices will be generated. global_secondary_indices must be a subset of 'keys'. Returns: A dict containing the table def - suitable for use with boto3. """ all_attributes = [{'AttributeName': k[0], 'AttributeType': k[2]} for k in keys] all_attributes = all_attributes + [{'AttributeName': a[0], 'AttributeType': a[1]} for a in (additional_attributes or [])] table_def = { 'TableName': table_name, 'KeySchema': [{'AttributeName': k[0], 'KeyType': k[1]} for k in keys], 'AttributeDefinitions': all_attributes, 'ProvisionedThroughput': { 'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1 } } if global_secondary_indices: table_def['AttributeDefinitions'] += [ { 'AttributeName': k[0], 'AttributeType': k[2] } for k in global_secondary_indices ] table_def['GlobalSecondaryIndexes'] = [ { 'IndexName': f'{k[3]}', 'ProvisionedThroughput': table_def['ProvisionedThroughput'], 'KeySchema': [ { 'AttributeName': k[0], 'KeyType': k[1], } ], 'Projection': {'ProjectionType': 'ALL'} } for k in global_secondary_indices ] return table_def
84b124ac0a084f05685ed4e636fb8eda78317c07
700,483
def execution_has_failures(playbook_results): """Return value 2 means failure in ansible-playbook""" if type(playbook_results) != list: playbook_results = [playbook_results] return 2 in playbook_results
1c8ea2767a78ca72147f65f3ca5fa90018f8c141
700,484
def element2obs(obs, allow_null=False): """ Remaps the old format to new api Observation model for a single observation. Returns a dictionary, not an observation instance. """ # Should only be one _obs = {} #For now we require non-empty strings if obs['answer']: _obs['value'] = obs['answer'] else: return {} node = obs.keys()[0] _obs['node'] = node _obs['concept'] = obs['concept'] return _obs
49dd82639b5099eed9b7251ddde903429c0fff38
700,485
def parse_load_balancer_name(load_balancer_arn): """ Parse name out from load balancer ARN Example: ARN of load balancer: 'arn:aws:elasticloadbalancing:us-east-1:881508045124:loadbalancer/app/alb-1/72074d479748b405', Load balancer name: 'alb-1' return: load balancer name """ return load_balancer_arn.split('/')[-2]
c64e4545d30c18bca2c05df3433958e012f0ed47
700,486
from pathlib import Path def get_paths(dir_name, glob): """ returns a generator of the recursive paths on input glob """ return Path(f'./{dir_name}').rglob(glob)
6803981e40397d000900dd0a8fc8ee32eddc6bc4
700,487
def inverse_relation(dst, rel): """ Similar to :meth:``forwards_relation`` but selects the source nodes instead, given a destination node. :param dst: The destination node. :param rel: The relation. """ statement = 'SELECT src FROM %s WHERE dst = ?' return statement % rel, (dst,)
f3dd3ab848ccad4d5594710659fc342ad584e6b7
700,488
def rmflvec(self, **kwargs): """Writes eigenvectors of fluid nodes to a file for use in damping APDL Command: RMFLVEC parameter extraction. Notes ----- RMFLVEC extracts the modal information from the modal results file for all nodes specified in a node component called 'FLUN'. This component should include all nodes which are located at the fluid-structural interface. Mode shapes, element normal orientation, and a scaling factor are computed and stored in a file Jobname.EFL. For damping parameter extraction, use the DMPEXT command macro. See Introduction for more information on thin film analyses. FLUID136 and FLUID138 are used to model the fluid interface. Both the structural and fluid element types must be active. The fluid interface nodes must be grouped into a component 'FLUN'. A results file of the last modal analysis must be available. Distributed ANSYS Restriction: This command is not supported in Distributed ANSYS. """ command = f"RMFLVEC," return self.run(command, **kwargs)
cd4f63f2e9a3addcbf6f711ee80133392c6d7d2a
700,489
import pickle def load_model(model_path): """Loads existing model Parameters: model_path - path to the model file Returns: loaded model """ with open(model_path, 'rb') as f: return pickle.load(f)
a9bf3abd5717e05a381bbaf7228a02217b8bccc5
700,490
def get_package_attribute(name): """Retrieve package attributes from the package itself.""" with open("darwin/__init__.py") as init_file: for line in init_file: if line.startswith(name): return eval(line.split("=")[-1])
33e7cdb53fffa844576e4e9387218b3ccbe6926d
700,491
import re def isBaidu(url): """Return True if this url matches the pattern for Baidu searches""" #Example: http://www.baidu.com/s?wd=mao+is+cool&rsv_bp=0&ch=&tn=baidu&bar=&rsv_spt=3&ie=utf-8 pattern = 'http://www.baidu.com/s\?wd=[\S+]+' matches = re.match(pattern, url) if matches != None: return True return False
b4c06154f1f4f2bd6a18bbcb08c2ce0b4d2cbbc9
700,492
import numpy as np def read_cnf_mols ( filename, with_v=False, quaternions=False ): """Read in molecular configuration.""" with open(filename,"r") as f: n=int(f.readline()) # Number of atoms box=float(f.readline()) # Simulation box length (assumed cubic) revw=np.loadtxt(filename,skiprows=2) # The rest of the file should be uniformly formatted rows, cols = revw.shape assert rows == n, "{:d}{}{:d}".format(rows,' rows not equal to ',n) cols_re = 7 if quaternions else 6 assert cols >= cols_re, "{:d}{}{:d}".format(cols,' cols less than ',cols_re) r = revw[:,0:3].astype(np.float_) # Coordinate array e = revw[:,3:cols_re].astype(np.float_) # Orientation array if with_v: assert cols >= cols_re+6, "{:d}{}{:d}".format(cols,' cols less than',cols_re+6) v = revw[:,cols_re :cols_re+3].astype(np.float_) # Velocity array w = revw[:,cols_re+3:cols_re+6].astype(np.float_) # Angular velocity/momentum array return n, box, r, e, v, w else: return n, box, r, e
f9ff9578d66d4f57740a10fae727202796b58cc6
700,493
def find_left_element(sorted_data, right, comparator): """! @brief Returns the element's index at the left side from the right border with the same value as the last element in the range `sorted_data`. @details The element at the right is considered as target to search. `sorted_data` must be sorted collection. The complexity of the algorithm is `O(log(n))`. The algorithm is based on the binary search algorithm. @param[in] sorted_data: input data to find the element. @param[in] right: the index of the right element from that search is started. @param[in] comparator: comparison function object which returns `True` if the first argument is less than the second. @return The element's index at the left side from the right border with the same value as the last element in the range `sorted_data`. """ if len(sorted_data) == 0: raise ValueError("Input data is empty.") left = 0 middle = (right - left) // 2 target = sorted_data[right] while left < right: if comparator(sorted_data[middle], target): left = middle + 1 else: right = middle offset = (right - left) // 2 middle = left + offset return left
7edc5ba49da8dbcbdc45310331b68c8ebe0c4ee1
700,494
def NamedParameterNames(fn): """! @brief Get names available to use as named parameters. """ try: co = fn.__code__ except AttributeError: return () return co.co_varnames[co.co_posonlyargcount:co.co_argcount+co.co_kwonlyargcount]
6d62a5d10d02483ee288b765c7e63a060871d839
700,495