content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def view(self, view_id): """Get a particular view belonging to a case by providing view id Arguments: view_id(int): view id Returns: :class:`rips.generated.generated_classes.View` """ views = self.views() for view_object in views: if view_object.id == view_id: return view_object return None
3cb0bd8b0b5f77b7172defed5f3ae7fc3336a372
112,881
def get_year_list(yearstring): """ Given a [yearstring] of forms year1 year1-year2 year1,year2,year3 year1-year2,year3-year4 Expands into a list of year integers, and returns """ years = [] for subset in yearstring.split(','): if subset == 'latest': years.append('latest') continue sublist = subset.split('-') start = int(sublist[0]) end = int(sublist[1])+1 if len(sublist) > 1 else start+1 years.extend(range(start,end)) return years
bc5ffd49bcdaa2e04f04a0ad1ac488981f84f630
112,882
def custom_options(option_list): """ Decorator for Click. Add options in the list to the command. Args: option_list(:obj:`list` of :obj:`click.option`): list of options Returns: function """ def ret_func(func): for option in reversed(option_list): func = option(func) return func return ret_func
693ff0f0dfad9fddc0a61174bb597db7c075ae83
112,884
def get_minerva_weekdays(weekend = False): """ Returns the minerva weekdays, and accepts an optional boolean paramter for weekends included Minerva days include 'M'onday 'T'uesday 'W'ednesday Thu'R'sday 'F'riday 'S'aturday and S'U'nday """ if weekend: return ['M','T','W','R','F','S','U'] else: return ['M','T','W','R','F']
52270097e342f9d263c173be9e3d66b8d487a61c
112,887
def split_data(X, y, frac = 0.1): """ Splits data into training and validation set """ split = int((1-frac) * len(y)) X_train, y_train = X[:split], y[:split] X_valid, y_valid = X[split:], y[split:] return X_train, y_train, X_valid, y_valid
d1d5bd83baad3a1c3b5219de90d99db76f930cff
112,888
def extract_forward(ffs): """ Returns the force fields that have been forward differentiated. Parameters ---------- ffs : list of `datatypes.FF` (or subclass) """ return [x for x in ffs if 'forward' in x.method.lower()]
6114ebbd4c837e87d336245f802d33c8a5694e0b
112,890
def serialize_for_deletion(elasticsearch_object_id): """ Serialize content for bulk deletion API request Args: elasticsearch_object_id (string): Elasticsearch object id Returns: dict: the object deletion data """ return {"_id": elasticsearch_object_id, "_op_type": "delete"}
8b5add9eb0ed1d9a8cd39ecc7896ac963ab7a1e2
112,894
def statusError(pkt): """ Grabs the status (int) from an error packet and returns it. It retuns -1 if the packet is not a status packet. """ if pkt[7] == 0x55: return pkt[8] return -1
03322c97e3b1563bc2dec9498317fc7672ca51c8
112,904
def verificar(rodadas, n): """ Verifica se existe algum problema nas rodadas Procura pelos seguintes erros: - Elementos repetidos em cada rodada - Dupla repetida nas rodadas - Número incorreto de jogos na rodada Parâmetros: ----------- rodadas: list Lista contendo as rodadas n: int número de elementos Retorna: bool - False se nenhum erro foi encontrado. - True se algum erro foi encontrado. """ error = False duplas = [] for rodada in rodadas: if len(rodada) != int(n/2): error = True print(f"Erro! Número de jogos na rodada.\n" f"Esperado {int(n/2)} jogos ao invés de {len(rodada)}.\n" f"Rodada: {rodada}\n") elementos = [] for dupla in rodada: x_id = dupla.find("X") el_i = [dupla[:x_id-1], dupla[x_id+2:]] for el in el_i: if el not in elementos: elementos.append(el) else: error = True print(f"Erro! Elemento repetido\n" f"Elemento: {el}\n" f"Rodada: {rodada}\n") if dupla not in duplas: duplas.append(dupla) else: error = True print(f"Erro! Dupla repetida\n" f"Dupla: {dupla}\n") if not error: print("Todas as rodadas estão corretas! :)") return False else: return True
4f495d5756e2025efc04d27777bb28bd076d02af
112,913
from typing import Dict from typing import Any def csv_io_kwargs(mode: str) -> Dict[str, Any]: """Return keyword arguments to properly open a CSV file in the given mode. """ return {"mode": mode, "newline": "", "encoding": "utf-8"}
beaf98025288ebe4b613e6d0fd7e864d97a7d3ee
112,918
def mappings_coincide(map1: dict, map2: dict) -> bool: """Returns True if the image of the intersection of the two mappings is the same, False otherwise. In other words, True if the mappings coincide on the intersection of their keys.""" intersection = set(map1.keys()) & set(map2.keys()) if any(map1[key] != map2[key] for key in intersection): return False return True
c0ef14b7669f030ecdb675c0a36625a75fb7b7a5
112,919
from enum import Enum def encode_enums(obj): """ Encode enums in yaml parser friendly data type :param obj: object to encode :return: encoded object """ if isinstance(obj, dict): new_obj = {} for key, value in obj.items(): value = encode_enums(value) new_obj[key] = value return new_obj elif isinstance(obj, Enum): return ['enum', type(obj).__name__, obj.name] else: return obj
7f6ea967df317a27e5991253c0f15850ae055ead
112,922
import ast def is_scope(node): """True if the ast node is a scope else False.""" if not isinstance(node, ast.AST): raise TypeError("The input must be an AST node.") return ( isinstance(node, ast.Module) or isinstance(node, ast.FunctionDef) or isinstance(node, ast.ClassDef) )
a2744f3ca6e9e6b46b61eb97af2c3a3a8607c1e6
112,923
def fetch_following(api,name): """ Given a tweepy API object and the screen name of the Twitter user, return a a list of dictionaries containing the followed user info with keys-value pairs: name: real name screen_name: Twitter screen name followers: number of followers created: created date (no time info) image: the URL of the profile's image To collect data: get the list of User objects back from friends(); get a maximum of 100 results. Pull the appropriate values from the User objects and put them into a dictionary for each friend. """ friends_ls=[] for friend in api.get_friends(screen_name=name,count=100): #friend_name=api.get_user(screen_name=friend) dict_user={} dict_user['name']=friend.name dict_user['screen_name']=friend.screen_name dict_user['followers']=friend.followers_count dict_user['created']=friend.created_at.strftime('%Y-%m-%d') dict_user['image']=friend.profile_image_url_https friends_ls.append(dict_user) return friends_ls
3f71ccbfdafd744ed61dbec793b68c78e081274b
112,924
def data_split(data, ratio=0.2): """Split data set into train/test set with a ratio procentage as test. This function creates a copy of the data set into a train and test set defined by the split ratio. Args: data (ndarray): The data set stored with observations as rows.\n ratio (float): The ratio of how large the test set is (deafult 0.2).\n Yields: train (ndarray): Train data set.\n test (ndaray): Test data set.\n Examples: Split the data set with 20% left as test data (known as unseen). >>> train, test = data_split(data, 0.2) """ split = int(len(data) - len(data)*ratio) train = data[0:split, ] test = data[split:, ] return((train, test))
cfbb29cbfe6933824d9fb408f7f8ab3629059552
112,925
def _convert_bool_string(value): """ Convert a "True" or "False" string literal to corresponding boolean type. This is necessary because Python will otherwise parse the string "False" to the boolean value True, that is, `bool("False") == True`. This function raises a ValueError if a value other than "True" or "False" is passed. If the value is already a boolean, this function just returns it, to accommodate usage when the value was originally inside a stringified list. Parameters ---------- value : string {"True", "False"} the value to convert Returns ------- bool """ if value in {"True", "False"}: return value == "True" elif isinstance(value, bool): return value else: # pragma: no cover raise ValueError(f'invalid literal for boolean: "{value}"')
a36fb7df910c4f5eb6d76e2fbab542dbccf7e3b0
112,927
def substr_ind(seq, line, *, skip_spaces=True, ignore_case=True): """ Return the start and end + 1 index of a substring match of seq to line. Returns: [start, end + 1] if needle found in line [] if needle not found in line """ if ignore_case: seq = seq.lower() line = line.lower() if skip_spaces: seq = seq.replace(' ', '') start = None count = 0 for ind, char in enumerate(line): if skip_spaces and char == ' ': continue if char == seq[count]: if count == 0: start = ind count += 1 else: count = 0 start = None if count == len(seq): return [start, ind + 1] return []
391e09ba8c7f4bce98f2f3f97ca385a87bc8b465
112,929
import torch def conjugate_gradient_block(A, B, x0=None, tol=1e-2, maxit=None, eps=1e-6): """ Solve the linear system A X = B using conjugate gradient, where - A is an abstract linear operator implementing an n*n matrix - B is a matrix right hand size of size n*s - X0 is an initial guess of size n*s Essentially, this runs #s classical conjugate gradient algorithms in 'parallel', and terminates when the worst of the #s residuals is below tol. """ X = x0 R = B - A(X) P = R Rs_old = torch.norm(R, 2., dim=0) ** 2. tol_scale = torch.mean(Rs_old) k = 0 while True: k += 1 AP = A(P) alpha = Rs_old / (torch.sum(P * AP, dim=0) + eps) X += P * alpha if k == maxit: break R -= AP * alpha Rs_new = torch.norm(R, 2., dim=0) ** 2. res = torch.max(Rs_new) if res < (tol ** 2.) * tol_scale: break P = R + P * (Rs_new / (Rs_old + eps)) Rs_old = Rs_new return X, k
e57134498b9c2b89d7d9a9b102eb5e3446338315
112,930
def powerTuplesToNumber(data, base): """Convert a power list to a decimal result.""" res = 0 for power, factor in data: res = res + factor * (base**power) return res
05441d10a04c36bec770d292c4b3344cdd55cde9
112,932
def count_total_num_hits(outlist_hits): """ Counts total number of family hits :param outlist_hits: A dictionary in the form of {rfamseq_acc: [(s1,e1),...] :return: Total number of hits found in the dictionary """ total_num_hits = 0 for accession in outlist_hits.keys(): total_num_hits += len(outlist_hits[accession]) return total_num_hits
fbf70b972f5895d241ef4e77f5611e423aebb87f
112,935
from typing import BinaryIO from typing import cast import struct def read_float(stream: BinaryIO) -> float: """Read a float value in big-endian order.""" return cast(float, struct.unpack('>f', stream.read(4))[0])
b5dc100feefb7bb4aaae6a891d8be03fb76dbc09
112,944
import json def get_json(filename: str) -> str: """Read a JSON file Args: filename: the JSON filename Returns: the data as a dictionary """ with open(filename) as file_stream: return json.load(file_stream)
07c213d12c1c8c9358775c7e2adbdf04939dd1a6
112,945
import re def convert_code(text_with_backticks): """Takes all back-quoted sections in a text field and converts it to the html tagged version of code blocks <code>...</code> """ return re.sub(r'`([^`]*)`', r'<code>\1</code>', text_with_backticks)
cf14ae1a3ecd4d5f02b45dfbe4669b8d6943e5eb
112,946
import re def clean_text(text): """ Function for text cleaning :param text: text to clean :return: cleaned text """ text = re.sub("'", "", text) text = re.sub("[^a-zA-Z]", " ", text) text = " ".join(text.split()) text = text.lower() return text
d6c5522604a993e09974d6759df5f4ed2900088c
112,947
def find_skips_in_seq(numbers): """Find non-sequential gaps in a sequence of numbers :type numbers: Iterable of ints :param numbers: Iterable to check for gaps :returns: List of tuples with the gaps in the format [(start_of_gap, end_of_gap, ...)]. If the list is empty then there are no gaps. """ last = numbers[0] - 1 skips = [] for elem in numbers: if elem != last + 1: skips.append((last, elem)) last = elem return skips
a4a31871b8a0471b24d99d9fe1311a7b99f197ac
112,949
def redact_desc(desc, redact): """ Redacts bio for the final hint by removing any mention of author's name, lastname, and any middle names, if present. Returns a redacted string """ redacted_desc = desc split_name = redact.split() while split_name: redacted_desc = redacted_desc.replace(split_name.pop(), "REDACTED") return redacted_desc
29a2485d1b237ca74042d3cde1a0b7fe4d8e39b2
112,950
def is_draft(request): """Check if the request is a draft (neither open nor closed).""" return not request.is_open and not request.is_closed
bd6e30836a72f21426ca2430cb405cca96495c33
112,952
def totaled_total_tbr(cc, sql_time_specification): # pragma: no cover """Counts the total number of commits with a TBR in a given timeframe Args: cc(cursor) sql_time_specification(str): a sql command to limit the dates of the returned results Return: result(int): a count of all commits with a TBR """ cc.execute("""SELECT COUNT(DISTINCT git_commit_hash) FROM commit_people INNER JOIN git_commit ON commit_people.git_commit_hash = git_commit.hash WHERE commit_people.type = 'tbr' AND %s""" % sql_time_specification) result = cc.fetchone() return int(result[0])
33b110a5627df041681291f5c0890f2fcd75c34e
112,953
def generate_experiment_histories_file_path(experiment_path_prefix): """ Given an 'experiment_path_prefix', append '-histories.npz'. """ return f"{experiment_path_prefix}-histories.npz"
7d7c22bd6e8f94d732c8d7db9df31d9a86146877
112,954
def find_dict_with(list_of_dicts, key, value): """ finds the first dictionary containing the key, value pair. Args: list_of_dicts: a list of dictionaries key: key to search for in the dictionaries value: the value that should be assigned to the key Returns: returns the first dictionary containing the key,value pair. """ for d in list_of_dicts: if key in d: if d[key] == value: return d; return None
d1fc9ff58b2599a63f6aa06790c0630a85b352d8
112,962
def select_features(keep_cols, df_train, df_test=None): """Keep selected cols in dataframes Parameters ---------- keep_cols : List List of columns to keep df_train : pd.DataFrame Training Dataframe containing all features df_test : pd.DataFrame Test Dataframe containing all features Returns ------- Pandas Dataframe Dataframe containing train dataset Pandas Dataframe Dataframe containing test dataset """ df_train_copy = df_train[keep_cols].copy() if df_test is not None: df_test_copy = df_test[keep_cols].copy() else: df_test_copy = None return df_train_copy, df_test_copy
359df61b7d9ddd5391a42926967e7202cc0414cf
112,963
def update_object(obj, path, callback=None): """ Traverse a data structure ensuring all nodes exist. obj: expected to be a dictionary path: string with dot-separated path components callback: optional callback function (described below) When update_object reaches the parent of the leaf node, it calls the optional callback function. The arguments to the callback function are: - parent: dictionary containing the leaf node - key: string key for the leaf node in parent - created: boolean flag indicating whether any part of the path, including the leaf node needed to be created. If the callback function is None, update_object will still ensure that all components along the path exist. If the leaf needs to be created, it will be created as an empty dictionary. Example: update_object({}, 'foo.bar') -> {'foo': {'bar': {}}} Return value: Returns either the return value of callback, or if callback is None, returns the value of the leaf node. """ parts = path.split(".") current = obj parent = obj created = False for part in parts: if len(part) == 0: raise Exception("Path ({}) is invalid".format(path)) if not isinstance(current, dict): raise Exception("Cannot set {}, not a dictionary".format(path)) # Create dictionaries along the way if path nodes do not exist, # but make note of the fact that the previous value did not exist. if part not in current: current[part] = {} created = True parent = current current = parent[part] if callback is not None: return callback(parent, parts[-1], created) else: return current
731997de3723a52962bd1988a5e0fe77927fc3b3
112,968
def query_labeled(table, filename=None, label=None, start=None, end=None): """ Query selection table for selections from certain audio files and/or with certain labels. Args: selections: pandas DataFrame Selections table, which must have a 'label' column. filename: str or list(str) Filename(s) label: int or list(int) Label(s) start: float Earliest end time in seconds end: float Latest start time in seconds Returns: df: pandas DataFrame Selection table """ df = table if filename is not None: if isinstance(filename, str): if filename not in df.index: return df.iloc[0:0] else: filename = [filename] df = df.loc[filename] if label is not None: if not isinstance(label, list): label = [label] df = df[df.label.isin(label)] if start is not None: df = df[df.end > start] if end is not None: df = df[df.start < end] return df
5e7bf303c07a59313d5c7f8332687e9c2f56f10a
112,969
def getlines(fname): """ Returns the content of a file as a list of lines, striped of the '\n' and trainling spaces. Parameters ---------- fname : str Text file to open. Returns ------- List of lines """ with open(fname) as f: content = f.readlines() return [line.rstrip() for line in content]
d2ccb3d7494ef194f81059f400f69468d60f872e
112,973
def extract_line(props, image): """ Extract the area of an image belonging to a feature given a bounding box :param props: the properties of the region :param image: image to extract the region from :returns: ndarray -- section of the image within the bounding box """ hs, ws = props['min_row'], props['min_col'] he, we = props['max_row'], props['max_col'] image_section = image[hs:he, ws:we] return image_section
b21d25df36d148c48f0e49465779ad91f6cb3dc6
112,976
def polyschedule(max_epochs, gamma=0.9): """Poly-learning rate policy popularised by DeepLab-v2: https://arxiv.org/abs/1606.00915 Args: max_epochs (int): maximum number of epochs, at which the multiplier becomes zero. gamma (float): decay factor. Returns: Callable that takes the current epoch as an argument and returns the learning rate multiplier. """ def polypolicy(epoch): return (1.0 - 1.0 * epoch / max_epochs) ** gamma return polypolicy
bb2fed780f69d83ff0a96cbe38361458e62ccaa8
112,978
def all_probs_count(len_A, n): """ Count how many different n-grams (for n=n) possible given Alphabet size = len_A """ return len_A ** n
95c125e02c359a3ce5ff2ff117a11676e67b3b90
112,982
def surface_area(length: int, width: int, height: int) -> int: """Calculate the surface area of a gift given its dimensions Args: length (int): The length of the gift width (int): The width of the gift height (int): The height of the gift Returns: int: The surface area of the gift """ return (2 * length * width) + (2 * width * height) + (2 * height * length)
f4843aac311c984a8f67ad0977111ed967707533
112,983
import requests def get_latest_ensembl_release() -> str: """Return the latest Ensembl release as provided by bioversions.""" url = "https://github.com/biopragmatics/bioversions/raw/main/src/bioversions/resources/versions.json" results = requests.get(url).json() versions = { entry["prefix"]: entry["releases"][-1]["version"] for entry in results["database"] if "prefix" in entry } ensembl_release = versions["ensembl"] assert isinstance(ensembl_release, str) return ensembl_release
6963a294fe81968a26272f8eed2b90f625997305
112,989
def segmentImgs(imgs, cap): """ Segments imgs in n sets of maximum <cap> images. :param imgs: Full list of all images :param cap: Maximum number of images per set :return: List containing n sets of images, where n is how many sets of <cap> images can be created. """ if len(imgs) <= cap: return [imgs] return [imgs[0:cap]] + segmentImgs(imgs[cap:], cap)
e6122f8ac5ca7c3c83d18c5dc0ffbc03c8bc8435
112,990
import re def fmt_cmd_template(cmd): """ Format the cmd template `cmd` so that in can be used e.g. for a os.system call """ return re.sub("\s+", " ", cmd)
4020f9ae36e5714c1c41a5d608fb719e3f125b06
112,992
import re def finish(text, finish, n=1): """ End a string with n instances of a given value. """ text = re.sub(re.escape(finish)+'+$', '', text) return text+(finish*n)
bc7534c1b04fd926b02df195e81a814eb156ad40
112,995
def has_selection(view): """Checks whether or not at least one selected region exists""" first_selected_region = view.sel()[0] return not first_selected_region.empty()
82dc88e6cc89b881a07fe88e6ee36d837e4d60a1
112,997
def fib(n): # This is the fib.py code from week 1 which feeds into fibname.py code """This function returns the nth Fibonacci number.""" i = 0 # set inital value of i to 0 j = 1 # set inital value of j to 1 n = n - 1 # whatever value is inputed for n, take 1 away from the value of n and set this value as the new value of n while n >= 0: # while n is greater of less than 0 do the following... i, j = j, i + j # i becomes the previous j value for the next iteration of code, and j becomes the prvious i value plus the previous j value n = n - 1 # take 1 away from n and set n as this new value return i
be8f3dd8d842570acc0963aeb48af1fda7f40778
112,998
import re def regexp_error_msg(pattern): """ Return None if the pattern is a valid regular expression or a string describing why the pattern is invalid. """ try: re.compile(pattern) except re.error as e: return str(e) return None
58285383b9e43885f10fc35cd56c36de33e55f25
113,002
def page_replace(context, value): """Replaces or adds the page number to form the pagination url. Example: if you're on the page ``/company/list/?sorted=created&page=5``, then <a href="/company/list/?{% page_replace 3 %}">Page 3</a> would expand to <a href="/company/list/?sorted=created&page=3">Page 3</a> """ query = context['request'].GET.copy() query['page'] = value return query.urlencode()
d6376f5f93323b2d4675ee25513b43d5afa7cf95
113,003
import re def extract_enums(text): """Extract enums from text Choices should satisfy following conditions: - is surrouned by curly braces, - able to evaluated as valid set Args: text (str): text to find choices Returns: Union[None, List]: None if choices are not found, else the List containing possible choices in sorted order """ pat = r'\{[^\{\}]+\}' out = None for m in re.finditer(pat, text): #curly braces expression cb = m.group() try: out = eval(cb) assert isinstance(out, set) out = sorted(list(out)) except: pass return out
8191d8ae279a86d47c5d3683b2adc4f690905e96
113,004
def getText(node): """ Get textual content of an xml node. """ text = [] for n in node.childNodes: if n.nodeType == n.TEXT_NODE: text.append(n.data) return ''.join(text)
e8dda800260ba7141bd59d0c77eb3c73ac2bac0a
113,005
from typing import List def vector_length(a: List[int]) -> float: """Compute the length of a vector Args: a (List[int]): vector Returns: float: length of the vector """ return sum(a[i] ** 2 for i in range(len(a))) ** 0.5
f16e4b4866d7721297c585b96379139f7f6eb773
113,006
from pathlib import Path def file_or_dir_exists(target): """ Check if the file or directory exists. When the target is a file, 'IS_FILE' is returned. When the target is a directory, 'IS_DIR' is returned. When the function cannot find the target, it returns 'NOT_FOUND'. """ try: file = Path(target) except: return 'TARGET_INVAILD' if file.is_file(): return 'IS_FILE' elif file.is_dir(): return 'IS_DIR' else: return False
122c031406d9d59752f15c67646167402e972ad2
113,007
def crop_img(img, leaf): """ For a given image and leaf position, returns the img, cropped to the leaf :param img: image that needs to be cropped :param leaf: {'x': int, 'y': int, 'width': int, 'height': int} :return: cropped image """ height = leaf['height'] width = leaf['width'] x = leaf['x'] y = leaf['y'] if x < 0: x = 0 if y < 0: y = 0 cropped = img[y:y + height, x:x + width] # cv2.imshow("cropped", cropped) # cv2.waitKey(0) return cropped
557a9107a9bcc52f60d1b7024d91f724468ebba5
113,011
def lyrics_field_info(data, offset=0): """lyrics_field_info(data, offset=0) -> None or (str, size) or (None, size) Return information about a single field in a lyrics tag. There must be at least 8 bytes of data available. Returns None if this isn't a valid field; otherwise, return (name, size): name - a 3-character string; or None if this is the tag length field size - the size associated with this field""" def _ucase(pos): return data[pos] > 64 and data[pos] <= 64+26 def _val(pos, len): ret = 0 for i in range(pos, pos+len): ch = data[i] if ch < 48 or ch > 57: return None # not a digit ret = (ret * 10) + (ch - 48) return ret if _ucase(offset) and _ucase(offset+1) and _ucase(offset+2): v = _val(offset+3, 5) if v is None: return None else: return ( data[offset:offset+3].tostring(), v ) v = _val(offset, 6) if v is None: return None # not a lyrics field else: return (None, v)
506d99bb134d18e0cbc4253cbe324172f7863740
113,014
def _get_SHA_params(SHA): """ Parameters ---------- SHA : SuccessiveHalvingSearchCV Returns ------- params : dict Dictionary to re-create a SuccessiveHalvingSearchCV without the estimator or parameters Example ------- >>> from sklearn.linear_model import SGDClassifier >>> model = SGDClassifier() >>> params = {"alpha": np.logspace(-1, 1)} >>> SHA = SuccessiveHalvingSearchCV(model, params, tol=0.1, ... patience=True, random_state=42) >>> _get_SHA_params(SHA) {'aggressiveness': 3, 'max_iter': 100, 'n_initial_iter': 9, 'n_initial_parameters': 10, 'patience': True, 'random_state': 42, 'scoring': None, 'test_size': None, 'tol': 0.1} """ return { k: v for k, v in SHA.get_params().items() if "estimator_" not in k and k != "parameters" and k != "estimator" }
ee49ab29c1a69c2a09e12f5a03fecef0cebd430f
113,016
def pathcalc_om(om1, om2, om3, omd): """Calculate the path required to reach a target by anchoring waypoints on orbital markers. Args: om1 (float): Distance in km of closest OM to target. om2 (float): Distance in km of 2nd closest OM to target. om3 (float): Distance in km of 3rd closest OM to target. omd (float): Distance in km between adjacent OMs on planetary body. Returns: leg1 (float): Distance in km of 1st leg (OM1 -> OM2) leg2 (float): Distance in km of 2nd leg (OM2 -> OM3) leg1r (float): Distance in km to OM2 after travelling 1st leg leg2r (float): Distance in km to OM3 after travelling 2nd leg """ # Calculate leg length using OM pathing formulae leg1 = (om1**2 + omd**2 - om2**2) / (2*omd) mid = (omd**2 + leg1**2 - omd*leg1)**0.5 leg2 = (omd**2 - omd*leg1 + om1**2 - om3**2) / (2*mid) # Calculate remaining distance to OMs after travelling leg leg1r = omd - leg1 leg2r = mid - leg2 # Round all values leg1 = round(leg1, 1) leg2 = round(leg2, 1) leg1r = round(leg1r, 1) leg2r = round(leg2r, 1) return leg1, leg2, leg1r, leg2r
5f18db34e76ada3d0c6ccf116306acc4cda1bdca
113,025
def get_error_details(res): """ Parses the error details retrieved from ServiceNow and outputs the resulted string. Args: res (dict): The data retrieved from ServiceNow. Returns: (str) The parsed error details. """ message = res.get('error', {}).get('message') details = res.get('error', {}).get('detail') return f'{message}: {details}'
909abdb7096dd07d0166cef7895b81ac8d313b0d
113,026
import re def expand_tasks_per_node(tasks_per_node): """Expand the tasks per node expression from SLURM The order is preserved so it can be matched to the hostlist Input: '3(x2),2,1' Output: [3, 3, 2, 1] """ result = [] try: for part in tasks_per_node.split(','): m = re.match(r'(\d+)(\(x(\d+)\))?$', part) assert m is not None num_tasks = int(m.group(1)) num_repetitions = int(m.group(3) or 1) result.extend([num_tasks] * num_repetitions) except Exception as e: raise ValueError('Invalid tasks-per-node list format "%s": %s' % (tasks_per_node, e)) return result
509a4413ea6eb2954d6b79126e9f33bd182acc21
113,032
def filter(df): """ Retain only restaurant businesses that are open :param df: business dataframe :return: restaurant business dataframe """ df = df[df['is_open'] == True] df = df[df.categories.notna()] df = df[df['categories'].str.contains("Restaurant|Restaurants|Food") == True] return df
213fffe6bff839ae8b8cad395238232fff57e083
113,033
def _check_all_same_channel_names(instances): """Check if a collection of instances all have the same channels.""" ch_names = instances[0].info["ch_names"] for inst in instances: if ch_names != inst.info["ch_names"]: return False return True
f4677329967f67001a18f9763681b64b077169b5
113,039
def empties(item): """ Helper function to format NoneTypes into dashes""" if item: return item else: return '-'
46c090a51b17f8c943864271c4e284ad66223b63
113,040
def dict_contains(superset, subset): """ Returns True if dict a is a superset of dict b. """ return all(item in superset.items() for item in subset.items())
6b8bb665e0cc2fbc33fd84f2325ea973666bbd94
113,042
def scale_mnist_data(train_images, test_images): """ This function takes in the training and test images as loaded in the cell above, and scales them so that they have minimum and maximum values equal to 0 and 1 respectively. Your function should return a tuple (train_images, test_images) of scaled training and test images. """ train_images = train_images/255.0 test_images =test_images/255.0 return (train_images, test_images)
f4e501f6d66e547fc921451d3016677d5db0edf4
113,044
def re_wrap(p): """ Wrap a regular expression if necessary, i.e., if it contains unescaped '|' in the outermost level. """ escaped = False level = 0 for c in p: if c == '\\': escaped = not escaped elif c == '(' and not escaped: level += 1 elif c == ')' and not escaped: level -= 1 elif c == '|' and not escaped: if level == 0: # outmost level, must wrap p = '(' + p + ')' break else: escaped = False return p
08732ce4a4b73985216c9b74def6ec44398ba0c8
113,059
def contains_variables(email): """Checks if an email contains any member variables. Args: email: The email to check. Returns: True if the email contains any member variables. """ return "{{first}}" in email.text or "{{last}}" in email.text or "{{email}}" in email.text
18e21aff57f7d640cf3fb397ac9a7230146b3c72
113,062
def map_pretty_name_to_file(map_name: str): """Converts an English map name to a file name""" return map_name.replace(" ", "").replace("'", "").strip()
c8a782f1a367be09a01edfd6f22d3e01e5e2e687
113,067
def build_full_file_url(file_field, build_full_url): """ Build the full URL for a file field. Args: file_field: The file field to build the full URL for. build_full_url: The function used to build a full URL out of an absolute path. Returns: The full URL to the file contained in the given field if it exists. If the field is empty, ``None`` is returned instead. """ return build_full_url(file_field.url) if file_field else None
a6b7af6c7bece2d362757a94066dacbe4bddf6b8
113,068
def get_budget_response(budget_name, budget_limit_amount, calculated_actual_spend, calculated_forecasted_spend): """Returns a mocked response object for the get_budget call :param budget_name: (str) the budget name :param budget_limit_amount: (float) the budget value :param calculated_actual_spend: (float) the current actual spend :param calculated_forecasted_spend: (float) the forecasted cost according to AWS :return: the response object """ return { "Budget": { "BudgetName": budget_name, "BudgetLimit": { "Amount": str(budget_limit_amount), "Unit": "USD" }, "CostTypes": { "IncludeTax": True, "IncludeSubscription": True, "UseBlended": False, "IncludeRefund": True, "IncludeCredit": True, "IncludeUpfront": True, "IncludeRecurring": True, "IncludeOtherSubscription": True, "IncludeSupport": True, "IncludeDiscount": True, "UseAmortized": False }, "TimeUnit": "MONTHLY", "TimePeriod": { "Start": 1556668800.0, "End": 3706473600.0 }, "CalculatedSpend": { "ActualSpend": { "Amount": str(calculated_actual_spend), "Unit": "USD" }, "ForecastedSpend": { "Amount": str(calculated_forecasted_spend), "Unit": "USD" } }, "BudgetType": "COST", "LastUpdatedTime": 1559530911.092 } }
8cf6979b48f0377cc3c5282c507d5c642b616ab9
113,071
def checkSeq(x, length): """ Returns true if the length of the weave 'x' is less than or equal to 'length' """ s = 0 for elem in x: s += abs(elem) return s <= length
043f92cb4ec4e8719d6af713eed6663d5c360fea
113,072
import functools import codecs def get_file_contents(source_path: str) -> str: """ Loads the contents of the source into a string for execution using multiple loading methods to handle cross-platform encoding edge cases. If none of the load methods work, a string is returned that contains an error function response that will be displayed when the step is run alert the user to the error. :param source_path: Path of the step file to load. """ open_funcs = [ functools.partial(codecs.open, source_path, encoding='utf-8'), functools.partial(open, source_path, 'r') ] for open_func in open_funcs: try: with open_func() as f: return f.read() except Exception: pass return ( 'raise IOError("Unable to load step file at: {}")' .format(source_path) )
39130aa8f763a0a72b33133adf5e2cdc4f3712ce
113,073
def real2binary(y, threshold=0.5, inclusive=True): """ Convert real values (-inf,inf) -> binary values {0,1} Parameters ---------- y: np.array (n,c) float/int (-inf,inf) threshold: float (-inf, inf) value greater than this is converted into 1, otherwise 0 inclusive: bool if True, equal to threshold -> 1, else -> 0 Returns ------- np.array (n,c) int {0,1} """ if inclusive: return (y >= threshold).astype(int) else: return (y > threshold).astype(int)
b6811f854e0ec22d56bcad4f19bf2d9c8e4bc6fb
113,075
from urllib.parse import urlparse def url_host(url: str) -> str: """ Parses hostname from URL. :param url: URL :return: hostname """ res = urlparse(url) return res.netloc.split(':')[0] if res.netloc else ''
f4338dfff81c7f5afce2e3e026d15a30a60859a2
113,078
from typing import Any import importlib def instantiate_class_from_path(path: str, instance_of: Any = None) -> Any: """ Given a dotted path, loads the module and instantiates the class instance if a subclass is provided, will enforce the class instantiated is infact as subclass of this type :param path: the path to attempt to find the module (and subsequently the class) :param instance_of: an expected class to compare isinstance on with the found class :return: the instance of the class """ try: module_name, class_name = path.rsplit(".", 1) loaded_module = importlib.import_module(module_name) cfg_subclass = getattr(loaded_module, class_name)() if instance_of and not isinstance(cfg_subclass, instance_of): raise ValueError( f"Unable to load a class of: {type(instance_of)} from: {path}" ) return cfg_subclass except (ModuleNotFoundError, AttributeError) as error: raise error from None
a298ab8fc1c3a9504e8534419afa0311e53dfd67
113,087
def wpstyl(self, snap="", grspac="", grmin="", grmax="", wptol="", wpctyp="", grtype="", wpvis="", snapang="", **kwargs): """Controls the display and style of the working plane. APDL Command: WPSTYL Parameters ---------- snap Snap increment for a locational pick (1E-6 minimum). If -1, turn off snap capability. For example, a picked location of 1.2456 with a snap of 0.1 gives 1.2, with 0.01 gives 1.25, with 0.001 gives 1.246, and with 0.025 gives 1.250 (defaults to 0.05). grspac Graphical spacing between grid points. For graphical representation only and not related to snap points (defaults to 0.1). grmin, grmax Defines the size of a square grid (if WPCTYP = 0) to be displayed over a portion of the working plane. The opposite corners of the grid will be located at grid points nearest the working plane coordinates of (GRMIN,GRMIN) and (GRMAX,GRMAX). If a polar system (WPCTYP = 1), GRMAX is the outside radius of grid and GRMIN is ignored. If GRMIN = GRMAX, no grid will be displayed (defaults to -1.0 and 1.0 for GRMIN and GRMAX respectively). wptol The tolerance that an entity's location can deviate from the specified working plane, while still being considered on the plane. Used only for locational picking of vertices for polygons and prisms (defaults to 0.003). wpctyp Working plane coordinate system type: 0 - Cartesian (default). If working plane tracking is on [CSYS,4], the updated active coordinate system will also be Cartesian. 1 - Polar. If working plane tracking is on, the updated active coordinate system will be cylindrical. 2 - Polar. If working plane tracking is on, the updated active coordinate system will be spherical. grtype Grid type: 0 - Grid and WP triad. 1 - Grid only. 2 - WP triad only (default). wpvis Grid visibility: 0 - Do not show GRTYPE entities (grid and/or triad) (default). 1 - Show GRTYPE entities. Cartesian working planes will be displayed with a Cartesian grid, polar with a polar grid. snapang Snap angle (0--180) in degrees. Used only if WPCTYP = 1 or 2. Defaults to 5 degrees. Notes ----- Use WPSTYL,DEFA to reset the working plane to its default location and style. Use WPSTYL,STAT to list the status of the working plane. Blank fields will keep present settings. It is possible to specify SNAP and WPTOL values that will cause conflicts during picking operations. Check your values carefully, and if problems are noted, revert to the default values. WPSTYL with no arguments will toggle the grid on and off. The working plane can be displayed in the non-GUI interactive mode only after issuing a /PLOPTS,WP,1 command. See the Modeling and Meshing Guide for more information on working plane tracking. See /PLOPTS command for control of hidden line working plane. This command is valid in any processor. """ command = f"WPSTYL,{snap},{grspac},{grmin},{grmax},{wptol},{wpctyp},{grtype},{wpvis},{snapang}" return self.run(command, **kwargs)
4e942a13332022b49b711a0cfbb5b8d3c5eeae3a
113,088
def build_shell_arguments(shell_args, apps_and_args=None): """Build the list of arguments for the shell. |shell_args| are the base arguments, |apps_and_args| is a dictionary that associates each application to its specific arguments|. Each app included will be run by the shell. """ result = shell_args[:] if apps_and_args: for (application, args) in apps_and_args.items(): result += ["--args-for=%s %s" % (application, " ".join(args))] result += apps_and_args.keys() return result
29ea269587c419d1ee4c66bd0cf13ea692e0daed
113,089
def get_track_id_from_search_query(spotify_obj, search_query, artist_name): """Method that returns a track ID returned from the search using Spotify API (managed by spotipy.Spotify object passed to the method) for a given search query and expected artist name (or the first result's ID, if the name was not found).""" results = spotify_obj.search(search_query) try: for item in results['tracks']['items']: if item['artists'][0]['name'] == artist_name: return item['id'] return results['tracks']['items'][0]['id'] except IndexError: return None
4cc17b4e162116c89c14f67bbdd5f5a3edff39a1
113,095
def get_topics(topics, mode='p', top=0.5): """ Returns the top topics from a list of topics with corresponding probabilities :param topics: list of tuples (topic, double) List of the topics with corresponding probabilities :param mode: str, optional If 'p' top percentage of topics will be considered If 'n' top number of topics will be considered Default to 'p' :param top: double, optional If mode = 'p' the top topics having their probability sum > top will be chosen If mode = 'n' the top top many topics will be chosen Default to 0.5 :return: t: list List containing the top topics """ t = sorted(topics, key=lambda x: x[1], reverse=True) t2 = [] s = 0 i = 0 if mode == 'p': while s < top and i < len(t): t2.append(t[i]) s += t[i][1] i += 1 elif mode == 'n': while i < top and i < len(t): t2.append(t[i]) i += 1 return t2
d7a6bf4e747baac936a751d496ede5b6b938baac
113,097
def format_id_list(id_data_dict: dict) -> list: """ Helper function to create an ID list from an ID data dictionary. Args: id_data_dict (dict): Dictionary containing full IDs data. Returns: list: containing IDs only. """ id_list = [] for id_data in id_data_dict: # The API might return ID, Id or id as key, so converting dict keys to be lowercase. data = {k.lower(): v for k, v in id_data.items()} id_list.append(data.get('id', None)) # Remove None objects from ID list, if exists. id_list = [id for id in id_list if id] return id_list
221358091fc97f1491f75f23cd4f1c130100ce80
113,103
def format_size(size): """Format a size of a file in bytes to be human-readable.""" size = int(size) if size > 1024: kib = size // 1024 + (size % 1024) / 1024.0 return ('%.2f' % kib).rstrip('0').rstrip('.') + ' KiB' return '%d B' % size
ee5d8c4aa678960892f5d1e20f3b40d622004fb1
113,105
def get_capabilities(conn): """ Returns a string which is the XML of the hypervisor'nova_tests capabilities :param conn: libvirt Connection object :return: (str) of capabilities in XML format """ return conn.getCapabilities()
ffe9cc18d84b88183a0f0ba4658cee1f98383386
113,112
import csv def load_database(filename): """Load database from a CSV file Args: filename: the filename of the database Returns: dictionary containing application data """ database = {} with open(filename) as csvfile: recipereader = csv.DictReader(csvfile, delimiter=',', quotechar='"') for row in recipereader: database[row['id']] = row return database
0689ef20f0ddd03aa8ec83c723ab1969267b5f7d
113,113
import pkg_resources def parse_requirements(requirements, in_file=None): """ Parse string requirements into list of :class:`pkg_resources.Requirement` instances :param str requirements: Requirements text to parse :param str in_file: File the requirements came from :return: List of requirements :raises ValueError: if failed to parse """ try: return list(pkg_resources.parse_requirements(requirements)) except Exception as e: in_file = ' in %s' % in_file if in_file else '' raise ValueError('{} {}'.format(e, in_file))
69a3392e99fa1a30b5244fa58af910fa90573c06
113,115
import re def remove_blank_lines(text): """Removes blank lines from the document. Args: text (string): markdown text that is going to be processed. Returns: string: text once it is processed. """ blank_line_regex = re.compile(r'^\s+$', re.UNICODE | re.MULTILINE) processed_text = blank_line_regex.sub(r'', text) return processed_text
ccef744c35e95e96b618712de27b8ead59aef71d
113,121
import io def encode_chunk(dataframe): """Return a file-like object of CSV-encoded rows. Args: dataframe (pandas.DataFrame): A chunk of a dataframe to encode """ csv_buffer = io.StringIO() dataframe.to_csv( csv_buffer, index=False, header=False, encoding="utf-8", float_format="%.17g", date_format="%Y-%m-%d %H:%M:%S.%f", ) # Convert to a BytesIO buffer so that unicode text is properly handled. # See: https://github.com/pydata/pandas-gbq/issues/106 body = csv_buffer.getvalue() body = body.encode("utf-8") return io.BytesIO(body)
0ff743b0d7df0a442ad5647f258cd10e052044c8
113,122
from typing import Awaitable from typing import Any import asyncio def run(convMethod: Awaitable) -> Any: """Use `pyrlottie.run(convMethod)` or `asyncio.get_event_loop().run_until_complete(convMethod)` in place of `asyncio.run(convMethod)` See https://github.com/awestlake87/pyo3-asyncio/issues/19#issuecomment-846686814 for more information Run until the future (an instance of Future) has completed. If the argument is a coroutine object it is implicitly scheduled to run as a asyncio.Task. Return the Future’s result or raise its exception. Args: convMethod (Awaitable): Awaitable to run. eg. convSingleLottie(gLottieFile, destFiles={"test_data/convSingleLottie.webp"}) Returns: Any: the Awaitable's result or raise its exception. """ return asyncio.get_event_loop().run_until_complete(convMethod)
3faf1a04d595c8fdb0a10e2a9d8f57a5f1bc271b
113,123
def calc_goals(data): """ Calculate the total traditional and weighted goals for all players, grouped by player id. Author: Rasmus Säfvenberg Parameters ---------- data : pandas.DataFrame A data frame as retrieved by weighted.get_data() Returns ------- weighted_goals : pandas.DataFrame A data frame with total goals and weighted goals per player. """ # Get required columns goals = data[["GoalScorerId", "reward"]].copy() # Intialize new columns that means 1 goal per event. goals["GoalsScored"] = 1 # Calculate number of goals and weighted goals per player weighted_goals = goals.groupby("GoalScorerId")[["GoalsScored", "reward"]].sum().reset_index().\ rename(columns={"GoalsScored": "Goals", "reward": "WeightedGoals", "GoalScorerId": "PlayerId"}).\ sort_values("WeightedGoals", ascending=False) return weighted_goals
cdce79ded307849ea8caacc7fdbe4732c11f2d7a
113,127
def without_prefix(arg: str, prefix: str) -> str: """Remove `prefix` from the head of `arg`.""" if not arg.startswith(prefix): raise ValueError(f"{arg=} does not start with {prefix=}.") return arg[len(prefix) :]
3a5c796cd548384e1cd557c6516b92020f12c040
113,128
import json def convert_dict_to_json(data_dict:dict) -> str: """convert a dictionary to json string""" return json.dumps(data_dict, indent=4, sort_keys=True)
feb427d557bec53229c438bc23b1d9673e262542
113,129
def to_format(phrase: str, param: str): """ The 'phrase' string is formatted taking 'param' parameter :param phrase: it must contain a {} that will be replaced by the 'param' parameter :param param: parameter :return: formatted string """ return phrase.format(param)
e85ff63425f9fbda909d4ed75544ee5c182daa29
113,130
def imxy2kxy(x, y, x0, y0, fx, fy): """ Conversion from Cartesian coordinate in binned image (x, y) to momentum coordinates (kx, ky). **Parameters**\n x, y: numeric, numeric Components of the Cartesian coordinates. x0, y0: numeric, numeric Origins of the Cartesian coordinates. fx, fy: numeric numeric Numerical scaling in the conversion. """ kx = fx * (x - x0) ky = fy * (y - y0) return (kx, ky)
39ddf6fc95ebbe876f1a529cd4fdd3e4bbc539bc
113,131
import torch def decorate_with_diffs(data, exponent, remove_raw_data=False): """ L2 norm: ||x-mean|| decorate_with_diffs 作用: 将原始数据(original data)以及 L2 norm 一起返回, 使 鉴别器 D 了解更多目标数据分布的信息 :param data: Tensor: 张量 :param exponent: 幂次 :param remove_raw_data: 是否移除原始数据 :return: torch.cat([data, diffs], dim=1), dim=0, 同型张量(Tensor)按行合并; dim=1, 同型张量(Tensor)按列合并; """ # dim=0, 行; dim=1, 列; keepdim: 做 mean后, 保持原数据的维度空间, 即, 原原数据为2维, mean 后仍为2维 mean = torch.mean(data.data, dim=1, keepdim=True) # 利用广播(broadcast)机制进行张量(Tensor)乘法 mean_broadcast = torch.mul(torch.ones(data.size()), mean.tolist()[0][0]) # data - data.mean[0] diffs = torch.pow(data - mean_broadcast, exponent) if remove_raw_data: return torch.cat([diffs], dim=1) else: # diffs: 返回样本数据与样本平均值的偏离程度(可以是n次方(exponent)) # 并将样本的偏离程度信息与原始样本一同输入到神经网络中 return torch.cat([data, diffs], dim=1)
a783beb56792f11342e01d12cada567ac1215888
113,132
def _desktop_escape(s): """Escape a filepath for use in a .desktop file""" escapes = {' ': R'\s', '\n': R'\n', '\t': R'\t', '\\': R'\\'} s = str(s) for unescaped, escaped in escapes.items(): s = s.replace(unescaped, escaped) return s
5cd8093de949cf455451e0e8377a082c5d637cd9
113,133
def read_hlas(fasta_fai): """Get HLA alleles from the hg38 fasta fai file. """ out = [] with open(fasta_fai) as in_handle: for line in in_handle: if line.startswith("HLA"): out.append(line.split()[0]) return out
b3a25429c7d95db9331070c8611296f830e92007
113,135
def key_from_dict(kwargs): """ Generator a key for a dictionary from a set of keyword arguments and their values. Used for the cache SQL lookups. frozenset, so that lookup order doesn't matter :param kwargs: Arguments to generate key from :return: frozenset of string keys generated from arguments """ return frozenset(f"{x}|{kwargs[x]}" for x in kwargs)
6ae31e1f60f0dd95539e10a5af7faeb9c9a3604e
113,137
import typing def empty(x: typing.Any) -> bool: """Returns true if x is None, or if x doesn't have a length, or if x's length is 0 """ if x is None: return True # noinspection PyBroadException try: return len(x) == 0 except: # noinspection PyBroadException return False
ecc576d7dc167b8a01753fd6a018a75515a611f5
113,138
from typing import Counter def get_word_counts(sentences): """ Create a Counter of all words in sentences, in lowercase. Args: sentences: List of sentences, from parse_conllu. Returns: Counter with word: count. """ words = [word[1].lower() for sentence in sentences for word in sentence] return Counter(words)
3b095405aea52edea7c35e97e0b5ed7f52bd5d7e
113,139
import json import random import time def pick_random_quote(path='responses/class_quotes.json'): """ Get a random quote from a jason file. :param path: a string that indicates the location where the quotes are stored. :return: a random quote """ with open(path) as f: responses = json.loads(f.read()) random.seed(round(time.time()) % 1e5) ## put random seed to make sure we don't get the same groups everytime response = random.sample(responses['responses'], 1)[0] return '"' + response["quote"] + '"' + " -- " + response["quotee"]
6d41f35a8316f09d30849d3d1c790e5be2065f68
113,145
def degisken_tiplerine_ayirma(data, cat_th, car_th): """ Veri:data parametresi ili fonksiyona girilen verinin değişkenlerin sınıflandırılması. Parameters ---------- data: pandas.DataFrame İşlem yapılacak veri seti cat_th:int categoric değişken threshold değeri car_th:int Cardinal değişkenler için threshold değeri Returns ------- cat_deg:list categorik değişken listesi num_deg:list numeric değişken listesi car_deg:list categoric ama cardinal değişken listesi Examples ------- df = dataset_yukle("breast_cancer") cat,num,car=degisken_tiplerine_ayirma(df,10,20) Notes ------- cat_deg + num_deg + car_deg = toplam değişken sayısı """ num_but_cat = [ i for i in data.columns if data[i].dtypes != "O" and data[i].nunique() < cat_th ] car_deg = [ i for i in data.columns if data[i].dtypes == "O" and data[i].nunique() > car_th ] num_deg = [ i for i in data.columns if data[i].dtypes != "O" and i not in num_but_cat ] cat_deg = [ i for i in data.columns if data[i].dtypes == "O" and i not in car_deg ] cat_deg = cat_deg + num_but_cat print(f"Dataset kolon/değişken sayısı: {data.shape[1]}") print(f"Dataset satır/veri sayısı: {data.shape[0]}") print("********************************************") print(f"Datasetin numeric değişken sayısı: {len(num_deg)}") print(f"Datasetin numeric değişkenler: {num_deg}") print("********************************************") print(f"Datasetin categoric değişken sayısı: {len(cat_deg)}") print(f"Datasetin categoric değişkenler: {cat_deg}") print("********************************************") print(f"Datasetin cardinal değişken sayısı: {len(car_deg)}") print(f"Datasetin cardinal değişkenler: {car_deg}") print("********************************************") return cat_deg, num_deg, car_deg
76e41cb9513a11e53a1c15003aea99558326daa8
113,150
def oci_compliant_name(name: str) -> str: """ Convert a name to an OCI compliant name. For now, we just make sure it is lower-case. This is depending on the implementation of your container registry. podman/buildah require lower-case repository names for now. Args: name: the name to convert Examples: >>> oci_compliant_name("foo") 'foo' >>> oci_compliant_name("FoO") 'foo' """ # OCI Spec requires image names to be lowercase return name.lower()
f05523d187a0e28b97f2546282aed02ab651c3ca
113,151
import fnmatch def _expand_upstream(upstream, task_names): """ Processes a list of upstream values extracted from source (or declared in the spec's "upstream" key). Expands wildcards like "some-task-*" to all the values that match. Returns a dictionary where keys are the upstream dependencies and the corresponding value is the wildcard. If no wildcard, the value is None """ if not upstream: return None expanded = {} for up in upstream: if '*' in up: matches = fnmatch.filter(task_names, up) expanded.update({match: up for match in matches}) else: expanded[up] = None return expanded
1287353b9bb23c5dd4a8947c834d628ef34f143e
113,155
def _initialize_tableau(A_ub, b_ub, A_eq, b_eq, tableau, basis): """ Initialize the `tableau` and `basis` arrays in place for Phase 1. Suppose that the original linear program has the following form: maximize:: c @ x subject to:: A_ub @ x <= b_ub A_eq @ x == b_eq x >= 0 Let s be a vector of slack variables converting the inequality constraint to an equality constraint so that the problem turns to be the standard form: maximize:: c @ x subject to:: A_ub @ x + s == b_ub A_eq @ x == b_eq x, s >= 0 Then, let (z1, z2) be a vector of artificial variables for Phase 1. We solve the following LP: maximize:: -(1 @ z1 + 1 @ z2) subject to:: A_ub @ x + s + z1 == b_ub A_eq @ x + z2 == b_eq x, s, z1, z2 >= 0 The tableau needs to be of shape (L+1, n+m+L+1), where L=m+k. Parameters ---------- A_ub : ndarray(float, ndim=2) ndarray of shape (m, n). b_ub : ndarray(float, ndim=1) ndarray of shape (m,). A_eq : ndarray(float, ndim=2) ndarray of shape (k, n). b_eq : ndarray(float, ndim=1) ndarray of shape (k,). tableau : ndarray(float, ndim=2) Empty ndarray of shape (L+1, n+m+L+1) to store the tableau. Modified in place. basis : ndarray(int, ndim=1) Empty ndarray of shape (L,) to store the basic variables. Modified in place. Returns ------- tableau : ndarray(float, ndim=2) View to `tableau`. basis : ndarray(int, ndim=1) View to `basis`. """ m, k = A_ub.shape[0], A_eq.shape[0] L = m + k n = tableau.shape[1] - (m+L+1) for i in range(m): for j in range(n): tableau[i, j] = A_ub[i, j] for i in range(k): for j in range(n): tableau[m+i, j] = A_eq[i, j] tableau[:L, n:-1] = 0 for i in range(m): tableau[i, -1] = b_ub[i] if tableau[i, -1] < 0: for j in range(n): tableau[i, j] *= -1 tableau[i, n+i] = -1 tableau[i, -1] *= -1 else: tableau[i, n+i] = 1 tableau[i, n+m+i] = 1 for i in range(k): tableau[m+i, -1] = b_eq[i] if tableau[m+i, -1] < 0: for j in range(n): tableau[m+i, j] *= -1 tableau[m+i, -1] *= -1 tableau[m+i, n+m+m+i] = 1 tableau[-1, :] = 0 for i in range(L): for j in range(n+m): tableau[-1, j] += tableau[i, j] tableau[-1, -1] += tableau[i, -1] for i in range(L): basis[i] = n+m+i return tableau, basis
53e34d84978bb1604d3b5e8270b4df8f1e16037a
113,156
import tarfile def untar(tarname,tardir): """ 解压tar.gz文件 tarname: 压缩文件名 tardir: 解压缩后的存放路径 """ try: t = tarfile.open(tarname) t.extractall(path=tardir) rig = "已经解压完成" return rig except Exception as e: return e
188ead1c90388d467ebaad7e99634ebbfeeed3c1
113,161