content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def convert_map(old_map, convert_dict, out_loc): """ To convert specific populations names in a sample map to another. Use cases: while clubbing populations, renaming them, etc... eg: convert STU, ITU, etc... to SAS. convert_dict for the above would be {"STU":"SAS","ITU":"SAS"} Helpful in a research workflow inputs: old_map: sample map convert_dict: renaming populations (use case: while clubbing populations, renaming them, etc...) out_loc: where to store it returns: list of lists - inner list -> [sample_name, population]: renamed sample map elements. writes: filtered sample map into out_loc """ subset_split = [] with open(old_map, "r") as f: for i in f.readlines(): sam_anc = i.rstrip("\n").split("\t") subset_split.append([sam_anc[0],convert_dict[sam_anc[1]]]) with open(out_loc, "w") as f: for ss in subset_split: f.write("{}\t{}\n".format(*ss)) return subset_split
ece286a4eeb0978891ce7a94ae8b125ee8cddaf6
27,758
def decompose_name_string(name): """ Accepts a name string and returns a list of possible versions of it. """ output = [] name_list = name.split(" ") for i in range(len(name_list) + 1) : output.append(" ".join(name_list[:i])) return output
978f332c8ac20c5363fd53572feff5b51d351a44
27,759
def inv_permutation(permutation): """Get the inverse of a permutation. Used to invert a transposition for example. Args: permutation (list or tuple): permutation to invert. Returns: list """ inverse = [0] * len(permutation) for i, p in enumerate(permutation): inverse[p] = i return inverse
ab75f150d9df12d6bbec64fbe4744d962b9de1c6
27,760
def parent_user_password(db, parent_user): """Creates a parent website user with a password.""" user = parent_user user.set_password('password') user.save() return user
56718190793179034d5cce86e5bc6060c8d5d5a2
27,761
def _combine_indexers(indexers1, indexers2): """ Conbine index data from two indexers :param indexers1: list of indexers to combine index data :param indexers2: second list of indexers to combine index data :return: first list of indexers containing index data from both indexers in pair""" if len(indexers1) != len(indexers2): raise ValueError('Cannot reduce results with different dimensions') return [indexer_pair[0] + indexer_pair[1] for indexer_pair in zip(indexers1, indexers2)]
c94d175542ff9fd3c88e592346a1f6023e27ba4a
27,763
def _get_val_list(obj, path_list, reverse=False): """Extract values from nested objects by attribute names. Objects contain attributes which are named references to objects. This will descend down a tree of nested objects, starting at the given object, following the given path. Args: obj: object Any type of object path_list: list Attribute names reverse: bool Reverse the list of values before concatenation. Returns: list of objects """ try: y = getattr(obj, path_list[0]) except AttributeError: return [] if len(path_list) == 1: return [y] else: val_list = [x for a in y for x in _get_val_list(a, path_list[1:], reverse)] if reverse: val_list.reverse() return val_list
b66c7242db6c02340a2b8b2d92d842894990891b
27,764
def remove_all_None(a_list): """Remove all None values from a list.""" # type: (list) -> list return [item for item in a_list if item is not None]
15eaeb7ef0208f3cd5519534bf155c62da88d3d5
27,765
def _get_ids(records, key): """Utility method to extract list of Ids from Bulk API insert/query result. Args: records (:obj:`list`): List of records from a Bulk API insert or SOQL query. key (:obj:`str`): Key to extract - 'Id' for queries or 'id' for inserted data. Returns: (:obj:`list`) of inserted record Ids in form [{'Id':'001000000000001'},...] """ return [{'Id': record[key]} for record in records]
2fe90c06a7458af49db87d2ee01350e065920113
27,766
def diagnosis_from_description(description): """ Return the diagnosis in each description """ diagnosis = description["meta"]["clinical"]["diagnosis"] if diagnosis not in ["nevus", "melanoma", "seborrheic keratosis"]: raise ValueError(diagnosis) return diagnosis
b2a46fe653648e8a5f9b662be45a2cd1f68cc339
27,768
def filter_delivery(df, tol_imps=100, tol_ctr=100): """ Objective: Filter out dates where delivery unbalances greater than inputs :param df: Dataframe :param tol_imps: tolerance impressions in percentage * 100 :param tol_ctr: tolerance ctr in percentage * 100 :return: Dataframe with relevant dates """ out_dates = [] for __d in df.DATE.unique(): max_imps = df.loc[df['DATE'] == __d, 'impressions'].max() min_imps = df.loc[df['DATE'] == __d, 'impressions'].min() diff_imps_perc = (max_imps - min_imps) / max_imps max_ctr = df.loc[df['DATE'] == __d, 'ctr'].max() min_ctr = df.loc[df['DATE'] == __d, 'ctr'].min() diff_ctr_perc = (max_ctr - min_ctr) / (max_ctr+.01) if diff_imps_perc * 100 > tol_imps or diff_ctr_perc * 100 > tol_ctr: out_dates.append(__d) return df[~df['DATE'].isin(out_dates)]
96f5df33ec07b7c500dabb5fe2333d62fe6ca6e7
27,772
def get_params_for_component(params, component): """ Returns a dictionary of all params for one component defined in params in the form component__param: value e.g. >> params = {"vec__min_df": 1, "clf__probability": True} >> get_params_for_component(params, "vec") {"min_df": 1} """ component_params = {} for k, v in params.items(): if k.startswith(component): _, component_arg = k.split(f"{component}__") component_params[component_arg] = v return component_params
aefee29c848fecab432efc74acd3d1bcaf80e539
27,773
import re def try_include(line): """ Checks to see if the given line is an include. If so return the included filename, otherwise None. """ match = re.match('^#include\s*[<"]?(.*)[>"]?$', line) return match.group(1) if match else None
f30c885ffa783f78f5a71dc1906ac6e158226361
27,775
import os def dirname(path, num=1): """Get absolute path of `num` directories above path""" path = os.path.abspath(path) for _ in range(num): path = os.path.dirname(path) return path
c864fc02a81ff4242c84980b6b57a0970ed82849
27,776
from typing import Dict from pathlib import Path import json def get_config() -> Dict: """Load config file from disk as python dict and return it. File is expected to exist on the same path as this source file. """ with open( Path(Path(__file__).parent, "config.json").resolve(), "r", ) as json_file: return json.load(json_file)
68a0a11ddfea137b1ede61686df630e1d9735c21
27,777
import inspect def _parse_cli_options(func): """Parse click options from a function signature""" options = [] for param in inspect.signature(func).parameters.values(): if param.kind not in {param.POSITIONAL_OR_KEYWORD, param.KEYWORD_ONLY}: # Only keyword arguments are currently supported continue option_name = '--' + param.name.lower().replace('_', '-').strip('-') kwargs = {} if param.annotation in {str, int, float, bool}: # Only basic types are currently supported kwargs['type'] = param.annotation if param.default != param.empty: kwargs['default'] = param.default else: # If the param doesn't have a default, then it's required kwargs['required'] = True if param.annotation == bool or isinstance(param.default, bool): if param.default is True: # If the default of a boolean option is ``True``, then add a # ``--no-x` off switch option_name += '/--no-' + option_name.lstrip('-') else: # If the default is ``False``, just make it a basic flag kwargs['is_flag'] = True args = (option_name, param.name) options.append((args, kwargs)) # Reverse it so the decorators are applied in the correct order return options[::-1]
16876af59f791a896b398f3562573ae828faee34
27,778
def get_filename_without_extension(filename): """ Returns the name of the 'filename', removing any extension. Here, an extension is indicated by a *dot*, e.g. 'file.txt' where 'file' denotes the name and 'txt' is the extension. If multiple extensions exist, only the last one is removed. In case no extension is present, the entire 'filename' is returned. In case the 'filename' is empty, the string 'file' is returned. :param filename: the filename to get the name from :return: the name of the given 'filename' """ if not filename: return 'file' try: name, extension = filename.rsplit('.', 1) return name except ValueError: return filename
7359be82706b1aa041c3559293db8e8cfe49f157
27,779
def __prepare_word(word: str) -> str: """ ### Args - word: `str`, word to be processed ### Returns - `str`, word with all non-alphanumeric characters removed in lower case (spaces are removed too) ### Errors raised - None """ prepared_word = "" for letter in word: if letter.isalnum(): prepared_word += letter return prepared_word.lower()
b25aab72f1640241f6a3a1e5bb7c5adc7056bc56
27,780
def make_variable_batch_size(num_inputs, onnx_model): """ Changes the input batch dimension to a string, which makes it variable. Tensorflow interpretes this as the "?" shape. `num_inputs` must be specified because `onnx_model.graph.input` is a list of inputs of all layers and not just model inputs. :param num_inputs: int, Number of model inputs (e.g. 2 for Text and Image) :param onnx_model: ONNX model instance :return: ONNX model instance with variable input batch size """ for i in range(num_inputs): onnx_model.graph.input[i].type.tensor_type. \ shape.dim[0].dim_param = 'batch_size' return onnx_model
e503ea83cac31c33fff0cee6909e7f6640acf4b5
27,781
import torch def ratio_disc(disc, x_real, x_fake): """Compute the density ratio between real distribution and fake distribution for `x` Args: disc: The discriminator x (ndarray): An array of shape (N,) that contains the samples to evaluate Returns: ndarray: The density ratios """ # Put samples together x = torch.cat([x_real, x_fake]) # Compute p / (p + q) p_over_pplusq = disc.classify(x) # Compute q / (p + q) q_over_pplusq = 1 - p_over_pplusq # Compute p / q p_over_q = p_over_pplusq / q_over_pplusq return p_over_q
c901a5a0b8c0201a00d34b8a01115240712f3b61
27,782
def decision_engine(p): """ Takes in the prediction object and prettifies it to be consumed by the outbound payload """ return [float(p.label), p.classProbabilities[0], p.classProbabilities[1], p.classProbabilities[2]]
0c6a825f893a1436a658cdff19b1c2056a14d439
27,785
def meme(does, quality, person, be, person2): """ Construct the stick-figure Bill meme from supplied parameters Usage examples: axs byname be_like , meme "finds and fixes an error in Wikipedia" smart axs byname dont_be_like , meme 'wrote an OS that everybody hates' selfish --person2=everybody """ return f"\nThis is {person}.\n\n{person} {does}.\n\n{person} is {quality}.\n\n{be} like {person2}.\n"
456e6eb2046274cb4c539e48c0a1a53635e76e3c
27,788
def power(x, n): """ 计算幂的递归算法 x^n = x * x^(n-1) 时间复杂度 O(n) :param x: :param n: :return: """ if n == 0: return 1 else: return x*power(x, n-1)
5772fc4ccd1e392f7e8cff4a6068b4cf21989312
27,789
import re def splitAtDelimiter(a_string, delimeters): """assumes a_string is a string assumes delimeters is a string sconsisting of the desired delimiters returns a list of strings, a_string split at the delimeters""" pattern = "(" for item in delimeters: pattern += (item+"|") pattern = pattern[:-1] pattern += ")" results = re.split(pattern, a_string) return results
96ffe5b6a5cad53c6ab18fa3ab23d2ff8d124391
27,792
import numpy def _get_error_matrix(cost_matrix, confidence_level): """Creates error matrix (used to plot error bars). S = number of steps in permutation test B = number of bootstrap replicates :param cost_matrix: S-by-B numpy array of costs. :param confidence_level: Confidence level (in range 0...1). :return: error_matrix: 2-by-S numpy array, where the first row contains negative errors and second row contains positive errors. """ mean_costs = numpy.mean(cost_matrix, axis=-1) min_costs = numpy.percentile( cost_matrix, 50 * (1. - confidence_level), axis=-1 ) max_costs = numpy.percentile( cost_matrix, 50 * (1. + confidence_level), axis=-1 ) negative_errors = mean_costs - min_costs positive_errors = max_costs - mean_costs negative_errors = numpy.reshape(negative_errors, (1, negative_errors.size)) positive_errors = numpy.reshape(positive_errors, (1, positive_errors.size)) return numpy.vstack((negative_errors, positive_errors))
a7589363347884eb711dc84e110aba367493eaa5
27,793
def csv_diff(): """The diff that should be reported for the CSV files.""" return ( r"""The files '\S*/file.csv' and '\S*/file.csv' are different:\n\n""" r"""Column 'col_a': Series are different\n\n""" r"""Series values are different \(33.33333 %\)\n""" r"""\[index\]: \[0, 1, 2\]\n""" r"""\[left\]: \[1, 2, 3\]\n""" r"""\[right\]: \[10, 2, 3\]\n\n""" r"""Column 'col_b': Series are different\n\n""" r"""Series values are different \(33.33333 %\)\n""" r"""\[index\]: \[0, 1, 2\]\n\[left\]: \[a, b, c\]\n\[right\]: \[a, b_new, c\]""" )
3ec0fdcadaee978cdd0f2da74acec0da371d3abc
27,794
def comp_height_eq(self): """Computation of the Frame equivalent Height for the mechanical model Parameters ---------- self : Frame A Frame object Returns ------- Hfra: float Equivalent Height of the Frame [m] """ return self.Rext - self.Rint
25da3ee420ff1ed4dad133ac7d039ae408bb9cbe
27,795
def filter_list(values, excludes): """ Filter a list of values excluding all elements from excludes parameters and return the new list. Arguments: values : list excludes : list Returns: list """ return list(x for x in values if x not in excludes)
68f25fe3afd4faebeefde7639a2b3d5885255e6a
27,796
def _process_scopes(scopes): """Parse a scopes list into a set of all scopes and a set of sufficient scope sets. scopes: A list of strings, each of which is a space-separated list of scopes. Examples: ['scope1'] ['scope1', 'scope2'] ['scope1', 'scope2 scope3'] Returns: all_scopes: a set of strings, each of which is one scope to check for sufficient_scopes: a set of sets of strings; each inner set is a set of scopes which are sufficient for access. Example: {{'scope1'}, {'scope2', 'scope3'}} """ all_scopes = set() sufficient_scopes = set() for scope_set in scopes: scope_set_scopes = frozenset(scope_set.split()) all_scopes.update(scope_set_scopes) sufficient_scopes.add(scope_set_scopes) return all_scopes, sufficient_scopes
85fa5d8f761358225343f75e1c1dfa531e661eb3
27,797
import hashlib def tagged_hash_init(tag: str, data: bytes = b""): """Prepares a tagged hash function to digest extra data""" hashtag = hashlib.sha256(tag.encode()).digest() h = hashlib.sha256(hashtag + hashtag + data) return h
955cc9fe6082d56663b9cd3531b0bb75aa2af472
27,798
def sort_decending(num): """Sort in descending order.""" return int("".join(sorted([n for n in str(num)], reverse=True)))
7cd222ab31d4df559ee9554a58fcb2dd33a34eb4
27,802
def dic_longpks(pk_dic_longpk, stem_dic, INIT, PENALTY): """ Function: dic_longpks() Purpose: Calculate pseudoknot free energies under energy model LongPK. Note that no shortened stems will occur here. Input: Dictionary with pseudoknots where L2 >= 7. Return: Dictionary with pseudoknots and associated free energy. """ pk_dic_longpk_result = {} for pk_stem in pk_dic_longpk: i, j, k, l = pk_stem[2], pk_stem[3], pk_stem[5], pk_stem[6] stem1 = i, j stemlength1 = pk_stem[4] stack_s1 = stem_dic[stem1][2] energy_s1 = stem_dic[stem1][3] stem2 = k, l stemlength2 = pk_stem[7] stack_s2 = stem_dic[stem2][2] energy_s2 = stem_dic[stem2][3] l1 = k - (i + stemlength1) l2 = (j - stemlength1 + 1) - (k + stemlength2) l3 = (l - stemlength2) - j looplength = l1 + l2 + l3 entropy = PENALTY*(looplength) pk_energy = stack_s1 + stack_s2 + entropy + INIT if pk_energy < 0.0: pk_dic_longpk_result[pk_stem] = pk_energy, stack_s1, stack_s2, l1, l2, l3, entropy, looplength return pk_dic_longpk_result
7030a188795003fdff892236d96b54bf1f7ba446
27,803
def generate_command_list( tool_yml, iteration_parameters, step, local=False, file_path=None ): """ Generates an AWS Batch command list from a tool YML Parameters: ----------- tool_yml : dict Tool YML from file iteration_parameters: dict Job parameters for a particular step step : dict Step from CWL. Used to make sure that the input is enabled in the workflow file_path = path to store intermediate files (local or s3) Returns: -------- list of str: Command list, where each string is a seperate string. Could be used as input to a docker RUN cmd """ # Command list generation try: use_cache = step["hints"]["saber"]["use_cache"] except KeyError: use_cache = "False" # Prepend to copy data from S3 (if any of the tool inputs are Files) if local: command_list = [ "python3", "/app/localwrap", "--wf", "Ref::_saber_stepname", "--use_cache", str(use_cache), ] # Only care about file inputs seperator = "," input_files = iteration_parameters.get("_saber_input", []) if len(input_files) > 0: input_files = input_files.split(",") command_list.append("--input") command_list.append(seperator.join(input_files)) # Append the data outputs to S3 output_files = iteration_parameters.get("_saber_output", []) if len(output_files) > 0: output_files = output_files.split(",") command_list.append("--output") command_list.append(seperator.join(output_files)) else: if file_path is not None: # bucket/directory/wf_id/ source = "/".join(file_path.split("/")[:-1]) command_list = [ "python3", "/app/s3wrap", "--to", file_path, "--fr", source, "--use_cache", str(use_cache), ] else: command_list = [ "python3", "/app/s3wrap", "--to", "Ref::_saber_stepname", "--fr", "Ref::_saber_home", "--use_cache", str(use_cache), ] # Only care about file inputs input_files = iteration_parameters.get("_saber_input", []) if len(input_files) > 0: command_list.append("--download") command_list.append("Ref::_saber_input") # Append the data outputs to S3 output_files = iteration_parameters.get("_saber_output", []) if len(output_files) > 0: command_list.append("--upload") command_list.append("Ref::_saber_output") # Not really necessary to split but I dont see a use case where one would want a space in their command... command_list.extend(tool_yml["baseCommand"].split()) command_list.extend([arg for arg in tool_yml["arguments"]]) # Create sorted input list to respect CWL input binding sorted_inps = [(inpn, inp) for inpn, inp in tool_yml["inputs"].items()] sorted_inps.sort(key=lambda x: x[1]["inputBinding"]["position"]) # Add to the command_list for inpn, inp in sorted_inps: if inpn in step["in"]: command_list.append(inp["inputBinding"]["prefix"]) command_list.append("Ref::{}".format(inpn)) return command_list
bf9f541361aaf40c24f81578b168211436e73770
27,805
def new_dict(num_dict): """ “循环左移” 1 格,并存入新字典""" tmp_dict = {} n = len(num_dict) for i in range(1, n): tmp_dict[f'n{i}'] = num_dict[f'n{i+1}'] # format 的另一种用法 tmp_dict[f'n{n}'] = num_dict[f'n{1}'] return tmp_dict
51614c295d4bb1763a4c8281091eb66734781583
27,807
import math def GsoAzimuth(fss_lat, fss_lon, sat_lon): """Computes the azimuth angle from earth station toward GSO satellite. Based on Appendix D of FCC 05-56. Inputs: fss_lat: Latitude of earth station (degrees) fss_lon: Longitude of earth station (degrees) sat_lon: Longitude of satellite (degrees) Returns: the azimuth angle of the pointing arc from earth station to GSO satellite. """ fss_lat = math.radians(fss_lat) fss_lon = math.radians(fss_lon) sat_lon = math.radians(sat_lon) if fss_lat > 0: azimuth = math.pi - math.atan2(math.tan(sat_lon-fss_lon), math.sin(fss_lat)) else: azimuth = math.atan2(math.tan(sat_lon-fss_lon), math.sin(-fss_lat)) if azimuth < 0: azimuth += 2*math.pi return math.degrees(azimuth)
f8761e3529f75a02d90369b5f8aa353f22bbc599
27,808
from typing import Callable from typing import Tuple import inspect def _make_decorator_stackable( wrapper_func: Callable, base_func: Callable, exclude_parameters: Tuple[str], ) -> Callable: """ Attaches neccessary meta info directly to the decorator function's objects making multiple instance of these deprecation decorators stackable while the parameter check before runtime stays intact. This now also facilitates the Blocking of deprecation warnings in the call hierarchy below another annotated function in order to prevent the raising of deprecation warnings triggered by kartothek internal calls. Parameters ---------- wraps_func The deprecation decorator's wraps func, that has the first deprecation decprator in the stacked strucure attached as attribute `outermost_stacked_kartothek_deprecator`. base_func The fuction decorated by the individual deprecation decorator. Please note, that this can either be the decorated fuction or another nested decorator. exclude_parameters Tuple of parameter names, that have been handled by other deprecation decorators already. Returns ------- any Returns the result of `func(*args, **kwargs)`. """ if hasattr(base_func, "kartothek_deprecation_decorator_params"): wrapper_func.kartothek_deprecation_decorator_params = tuple( # type: ignore param for param in base_func.kartothek_deprecation_decorator_params # type: ignore if param not in exclude_parameters ) else: wrapper_func.kartothek_deprecation_decorator_params = tuple( # type: ignore param for param in inspect.signature(base_func).parameters.keys() if param not in exclude_parameters ) # Facilitate detection of outermost deprecation warning in order to limit warning reporting of Kartothek internal # calls. if not hasattr(wrapper_func, "outermost_stacked_kartothek_deprecator"): wrapper_func.outermost_stacked_kartothek_deprecator = wrapper_func # type: ignore base_func.outermost_stacked_kartothek_deprecator = ( # type: ignore wrapper_func.outermost_stacked_kartothek_deprecator # type: ignore ) # explicitly preserve signature, facilitating compatibility with other decorators. wrapper_func.__signature__ = inspect.signature(base_func) # type: ignore return wrapper_func
7648021d83ff5a44e7a86e777edba7dd8794010e
27,809
def is_garbage(raw_text, precision): """ Check if a tweet consists primarly of hashtags, mentions or urls Args: tweet_obj (dict): Tweet to preprocess. """ word_list = raw_text.split() garbage_check = [ token for token in word_list if not token.startswith(("#", "@", "http"))] garbage_check = " ".join(garbage_check) if ((len(raw_text) - len(garbage_check)) / len(raw_text)) >= precision: return True else: return False
a508921e08673e686eccc43b8dab97424fff7726
27,810
def read_file(path): """Read file.""" with open(path) as _file: return _file.read()
bed1e255478c6d43d84240e1c1969aa3c1bc21f3
27,813
def constrain(val, min_val, max_val): """ Method to constrain values to between the min_val and max_val. Keyword arguments: val -- The unconstrained value min_val -- The lowest allowed value max_val -- The highest allowed value """ return min(max_val, max(min_val, val))
655cc16ad425b6ca308d3edbd2881a3923ef195e
27,814
def part2(data): """ Check on other criteria """ return data
b13d7913ba2b30376b8a867ac8205e9b068a6d38
27,816
from typing import Counter def small_class(raw_data, labels, threshold=20): """Removes samples and classes for classes that have less than `threshold` number of samples.""" counts = Counter(labels) data, n_labels = [], [] for i, l in enumerate(labels): if counts[l] >= threshold: data.append(raw_data[i]) n_labels.append(l) return data, n_labels
cf80bd67ccc3d69baf0b71f226c8b56ef5b80e7c
27,818
def rescale_size(size, scale, return_scale=False): """ Compute the new size to be rescaled to. Args: size (tuple[int]): The original size in the form of ``(width, height)``. scale (int | tuple[int]): The scaling factor or the maximum size. If it is a number, the image will be rescaled by this factor. When it is a tuple containing 2 numbers, the image will be rescaled as large as possible within the scale. In this case, ``-1`` means infinity. return_scale (bool, optional): Whether to return the scaling factor. Default: ``False``. Returns: :obj:`np.ndarray` | tuple: The new size (and scaling factor). """ w, h = size if isinstance(scale, (float, int)): scale_factor = scale elif isinstance(scale, tuple): if -1 in scale: max_s_edge = max(scale) scale_factor = max_s_edge / min(h, w) else: max_l_edge = max(scale) max_s_edge = min(scale) scale_factor = min(max_l_edge / max(h, w), max_s_edge / min(h, w)) else: raise TypeError( "'scale must be a number or tuple of int, but got '{}'".format( type(scale))) new_size = int(w * scale_factor + 0.5), int(h * scale_factor + 0.5) if return_scale: return new_size, scale_factor else: return new_size
4baa26011ab191c4adca963c5ad7b6e63941b740
27,820
import subprocess def _LoadEnvFromBat(args): """Given a bat command, runs it and returns env vars set by it.""" args = args[:] args.extend(('&&', 'set')) popen = subprocess.Popen( args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) variables, _ = popen.communicate() if popen.returncode != 0: raise Exception('"%s" failed with error %d' % (args, popen.returncode)) return variables.decode(errors='ignore')
26971669ab52afb5e59b2128fe824dc42f69d550
27,821
def compose(f, g): """Function composition. ``compose(f, g) -> f . g`` >>> add_2 = lambda a: a + 2 >>> mul_5 = lambda a: a * 5 >>> mul_5_add_2 = compose(add_2, mul_5) >>> mul_5_add_2(1) 7 >>> add_2_mul_5 = compose(mul_5, add_2) >>> add_2_mul_5(1) 15 """ # pylint: disable = invalid-name, star-args return lambda *args, **kwargs: f(g(*args, **kwargs))
053c1c6db1517a10ef0580268abb709441a71333
27,822
from pathlib import Path def get_rockets_frames(): """Init rocket animation frames.""" frames_files = ['rocket_frame_1.txt', 'rocket_frame_2.txt'] frames = [(Path(__file__).resolve().parent / frame_file_name).read_text() for frame_file_name in frames_files] return tuple(frames)
318c514d2328ad177bf5a2ee4db46e87b9df25d0
27,824
def keynat(string): """ A natural sort helper function for sort() and sorted() without using regular expressions or exceptions. >>> items = ('Z', 'a', '10th', '1st', '9') >>> sorted(items) ['10th', '1st', '9', 'Z', 'a'] >>> sorted(items, key=keynat) ['1st', '9', '10th', 'a', 'Z'] :type string: string :param string: String to compare :rtype: int :return: Position """ it = type(1) r = [] for c in string: if c.isdigit(): d = int(c) if r and type( r[-1] ) == it: r[-1] = r[-1] * 10 + d else: r.append(d) else: r.append(c.lower()) return r
fa8a1e52ae97ff78cecab0afe1050142fd12d18a
27,826
from typing import List def intersection(lst1, lst2) -> List: """Calculate the intersection of two lists, with ordering based on the first list.""" lst3 = [value for value in lst1 if value in lst2] return lst3
09f3447a79e995ad7dc7d34756f0cf832638228c
27,828
def add_ining(new_data, data): """이닝 수를 최대값인 18에 맞춰서 추가해주고 키 이름도 변경해주는 함수 """ for i in range(1, 19): if str(i) in data: new_data["i_"+str(i)] = data[str(i)] else: # 데이터 추가 new_data["i_" + str(i)] = "-" return new_data
c6d6dcf4aab7ef5d5cad200685eaeb57689a549a
27,829
def coords2polygon(coord_list): """Formats list of 2D coordinate points as string defining Polygon in PostgreSQL""" coords_ = [str(x) + " " + str(y) for x, y in coord_list] return "POLYGON(("+",".join(t for t in coords_)+"))"
bc1d27fcdc01142497dde18da83f4584a9453971
27,830
def format_address(address): """Remove non alphanumeric/whitespace characers from restaurant address but allows for commas """ return ''.join(chr for chr in address if chr.isalnum() or chr.isspace() or chr == ",")
6cb191b6672744dfedb570fa1e85f85876fa2895
27,832
import pickle def load_pickled_data(path): """Load in a pickled data file Args ---- path (str) : path to the file to read Returns ------- the data object """ with open(path, "rb") as f: data = pickle.load(f) return data
18a4c352d14762c4b52dc205336a49a2c88cfbc1
27,833
def output_yolo(output): """Gets the output of yolo model Args: output (str): output of yolo model in string format Returns: bboxes (tuple): list of boundaries of table in the pdf page in top-left, right-bottom format """ output = output.split("\n") output.remove("") bboxes = [] for x in output: cleaned_output = x.split(" ") cleaned_output.remove("") cleaned_output = [eval(x) for x in cleaned_output] bboxes.append(cleaned_output) return bboxes
f802d952bc2622d5dc20264b2ec858c1de9858bc
27,834
def spec_is_empty(specification): """Check if specification value is empty Args: specification: List of specification values """ if len(specification) == 0: return True return False
929bf64b50a8a74fef0efa15ec7577a1d1fd39ac
27,835
import sys def extract_house_income(loaded_df): """ Extracts the household income of each row. The coded values are the following: -1 - Invalid value 1 - Less than 25k 2 - 25k to 49,999 3 - 50k to 74,999 4 - 75k to 99,999 5 - 100k and over :param loaded_df: The dataframe loaded from the file :return: A list containing the new values of the household income """ hous_incs = [] columns = loaded_df.columns col_name = "" print('Extracting Household Income') if "HHInc" in columns: col_name += "HHInc" elif "HHINC" in columns: col_name += "HHINC" elif "HHINCP" in columns: col_name += "HHINCP" else: print("No Household Income Column Detected") sys.exit(0) for hous_inc in loaded_df[col_name].tolist(): if col_name == "HHINCP": if hous_inc < 10: # less than 25k hous_incs.append(1) elif hous_inc < 15: # 25k to 49,999 hous_incs.append(2) elif hous_inc < 20: # 50k to 74,999 hous_incs.append(3) elif hous_inc < 22: # 75k to 99,999 hous_incs.append(4) elif hous_inc < 99: # over 100k hous_incs.append(5) else: # invalid value hous_incs.append(-1) elif col_name == "HHINC": if hous_inc < 10: # less than 25k hous_incs.append(1) elif hous_inc < 15: # 25k to 49,999 hous_incs.append(2) elif hous_inc < 20: # 50k to 74,999 hous_incs.append(3) elif hous_inc < 23: # 75k to 99,999 hous_incs.append(4) elif hous_inc <= 28: # over 100k hous_incs.append(5) else: # invalid value hous_incs.append(-1) else: if hous_inc < 10: # less than 25k hous_incs.append(1) elif hous_inc < 15: # 25k to 49,999 hous_incs.append(2) elif hous_inc < 20: # 50k to 74,999 hous_incs.append(3) elif hous_inc < 25: # 75k to 99,999 hous_incs.append(4) elif hous_inc <= 33: # over 100k hous_incs.append(5) else: # invalid value hous_incs.append(-1) print('Household Income extracted') return hous_incs
a6dd1a86de331433267e5d0e282be345970031cc
27,836
def get_outputs(lst, uses, seen): """Return the list of nodes whose values are required beyond this segment. Arguments: lst: list of nodes (the segment) uses: dict mapping each node to its uses (globally) seen: set of nodes that are part of the segment """ outputs = [] for n in lst: if n.is_apply() and any(u[0] not in seen for u in uses[n]): outputs.append(n) return outputs
03d6c859bb70aa5ce868b9c71ff7ce6092d52604
27,837
import re def regexp_quote(text): """\ Return a regexp matching TEXT without its surrounding space, maybe followed by spaces. If STRING is nil, return the empty regexp. Unless spaces, the text is nested within a regexp parenthetical group. """ if text is None: return '' if text == ' ' * len(text): return ' *' return '(' + re.escape(text.strip()) + ') *'
88da7ed8918fa8f91909eda1486b227dd7f1c41d
27,839
def make_fortran_symbols(module, name): """ Makes a list of symbols, gcc and intel, for each variable or function """ gcc_symbol = "__"+module.lower() + "_MOD_" + name.lower() intel_symbol = module.lower() + "_mp_" + name.lower() + "_" return "(\"" + gcc_symbol + "\", \"" + intel_symbol + "\")"
9ddac30bded9f62bf165f8f34b224f50c24849d5
27,843
def get_matches_metadata(infile): """ Reads match IDs and metadata from a filename. Args: infile: Filename where match IDs and metadata are stored (string). Returns: List of dicts with IDs and metadata for each match. """ out = [] with open(infile, "r") as file: lines = file.read().split("\n") # First two lines are column names and types. header = lines[0].split(",") types = lines[1].split(",") for line in filter(lambda x: len(x) > 0, lines[2:]): data = line.split(",") row = {} for i in range(len(data)): if types[i] == "int": row[header[i]] = int(data[i]) elif types[i] == "float": row[header[i]] = float(data[i]) elif types[i] == "str": row[header[i]] = data[i] else: err = "Unsupported column type: {}".format(types[i]) raise ValueError(err) out.append(row) return out
c9b70b40ea0c1ada0af0b6b9c6fbb7e3d95d83e5
27,844
def by_type(objects, t): """ Filter given list of objects by 'type' attribute. Used for filtering ProducOption objects. """ return filter(lambda x: x.type == t, objects)
8fef943468a649a199a646af2155917e787a7c52
27,845
def _get_init_or_call_arg(class_name, arg_name, init_value, call_value): """Returns unified value for arg that can be set at init or call time.""" if call_value is None: if init_value is None: raise ValueError( f"{class_name} requires {arg_name} to be set at init or call time") return init_value else: if init_value not in [None, call_value]: raise ValueError( f"{class_name}(..., {arg_name}={init_value})" f"was called with contradictory value {arg_name}={call_value}") return call_value
c955bd865da0436b6b88ccb938bda1f178b83aff
27,846
def lmParamToPoint(a, c): """ Return the coordinates of a landmark from its line parameters. Wall landmarks are characterized by the point corresponding to the intersection of the wall line and its perpendicular passing through the origin (0, 0). The wall line is characterized by a vector (a, c) such as its equation is given by y = ax + c. """ xp = float(-c*a / (1+a**2)) yp = float(c / (1+a**2)) return [xp, yp]
6b98613216f1287ed9b25f1345ea0a18aa0fc90b
27,847
from typing import Any from typing import Callable def pipe(in_: Any, *args: Callable[[Any], Any]) -> Any: """Basic pipe functionality Example usage: >>> pipe( ... [True, False, 1, 3], ... all, ... lambda x: "It's true" if x else "They lie" ... ) 'They lie' """ for function in args: in_ = function(in_) return in_
8eff195886ec9daf8391532cb21dc61182462c34
27,848
def remove_crs(mystring): """Removes new lines""" return mystring.replace('\n', ' ').replace('\r', '')
300d3a527912b4c60f3c5493067d1bb99756961a
27,852
import textwrap def _process_line(line, width, indent): """Process a line in the CLI help""" line = textwrap.fill( line, width, initial_indent=indent, subsequent_indent=indent, replace_whitespace=False, ) return line.strip()
99b5bc3c4885478a542fba24f6aee6667bdf7e3d
27,854
from re import X def label_data(frame, model): """Predict cluster label for each tract""" frame["cluster"] = model.predict(X) ix = ["geoid", "state_abbr", "logrecno", "geo_label", "cluster"] return frame.reset_index().set_index(ix)
7a27a0722394b90aba237a821be3d2a5730403c0
27,856
import platform import sys def get_platform() -> str: """Get FMU binary platform folder name.""" system = platform.system() is_64bits = sys.maxsize > 2 ** 32 platforms = {"Windows": "win", "Linux": "linux", "Darwin": "darwin"} return platforms.get(system, "unknown") + "64" if is_64bits else "32"
06e4dd0f3296f531988d53da23cb31ee260ed4a4
27,857
def rel_2_pil(rel_coords, w, h): """Scales up the relative coordinates to x1, y1, x2, y2""" x1, x2, y1, y2 = rel_coords return [int(x) for x in [x1 * w, y1 * h, x2 * w, y2 * h]]
f619f4a0920db503401abdd0cfd86b61116c4992
27,858
import time def datetime_format(epoch): """ Convert a unix epoch in a formatted date/time string """ datetime_fmt = '%Y-%m-%dT%H:%M:%SZ' return time.strftime(datetime_fmt, time.gmtime(epoch))
e45f7874bebdbe99a1e17e5eb41c5c92e15a96b3
27,859
def count_genes_in_pathway(pathways_gene_sets, genes): """Calculate how many of the genes are associated to each pathway gene set. :param dict pathways_gene_sets: pathways and their gene sets :param set genes: genes queried :rtype: dict """ return { pathway: len(gene_set.intersection(genes)) for pathway, gene_set in pathways_gene_sets.items() }
bb3859c9a6b8c17448a6cbcc3a85fc315abbab31
27,861
import numpy def compute_mutation_frequency(x, threshold): """Computes mutation frequency for positions with coverage larger than threshold. """ fraction_list = [] for cov, err in zip(x.coverage, x.errors): if cov > threshold: fraction_list.append(err / cov) else: fraction_list.append(numpy.NaN) return fraction_list
e53c29d7547e9814c4596d358db39e332c302eca
27,862
import unittest import inspect def AbstractTestCase(name, cls): """Support tests for abstract base classes. To be used as base class when defining test cases for abstract class implementations. cls will be bound to the attribute `name` in the returned base class. This allows tests in the subclass to access the abstract base class or its concretization. """ class BaseTestCase(unittest.TestCase): """TestCase that is skipped if the tested class is abstract.""" def run(self, *args, **opts): """Run the test case only for non-abstract test classes.""" if inspect.isabstract(getattr(self, name)): return else: return super(BaseTestCase, self).run(*args, **opts) setattr(BaseTestCase, name, cls) return BaseTestCase
66641e3c9d805946880ac8dfc41827f51986f6aa
27,863
def get_time_slices(time_range, interval): """ split time range based on interval Args: time_range (list): time range of diagnose interval (int): diagnose time interval Returns: """ time_points = [*range(time_range[0], time_range[1], interval), time_range[1]] time_slices = [[time_points[index-1], time_points[index]] for index in range(1, len(time_points))] return time_slices
d22dfb72cdaa5f399171ed1380d9ad2b8bb6f8b4
27,864
import itertools def _update_with_replacement(lhs_dict, rhs_dict): """Delete nodes that equate to duplicate keys Since an astroid node doesn't 'equal' another node with the same value, this function uses the as_string method to make sure duplicate keys don't get through Note that both the key and the value are astroid nodes Fixes issue with DictUnpack causing duplicte keys in inferred Dict items :param dict(nodes.NodeNG, nodes.NodeNG) lhs_dict: Dictionary to 'merge' nodes into :param dict(nodes.NodeNG, nodes.NodeNG) rhs_dict: Dictionary with nodes to pull from :return dict(nodes.NodeNG, nodes.NodeNG): merged dictionary of nodes """ combined_dict = itertools.chain(lhs_dict.items(), rhs_dict.items()) # Overwrite keys which have the same string values string_map = {key.as_string(): (key, value) for key, value in combined_dict} # Return to dictionary return dict(string_map.values())
6b13c197af2da654b29547bcffd5b6af8a3e6607
27,865
def _normalize(x, axis): """Normalize, preserving floating point precision of x.""" x_sum = x.sum(axis=axis, keepdims=True) if x.dtype.kind == 'f': x /= x_sum else: x = x / x_sum return x
3adc63499fab32453d53a339364c20b9f209f6eb
27,866
import subprocess def run(*args): """Run a command.""" return subprocess.run(args, capture_output=True, check=True, text=True)
d99d69d279424448f29eebe7cec8ee96a41b4fcd
27,867
import itertools def flat_map(visitor, collection): """Flat map operation where returned iterables are flatted. Args: visitor: Function to apply. collection: The collection over which to apply the function. Returns: Flattened results of applying visitor to the collection. """ return itertools.chain.from_iterable( map(visitor, collection) )
5501e4adc18ca8b45081df4158bcd47491743f29
27,868
def hex_sans_prefix(number): """Generates a hexadecimal string from a base-10 number without the standard '0x' prefix.""" return hex(number)[2:]
6faaec36b2b3d419e48b39f36c1593297710a0a4
27,869
def curly_bracket_to_img_link(cb): """ Takes the curly-bracket notation for some mana type and creates the appropriate image html tag. """ file_safe_name = cb[1:-1].replace('/', '_').replace(' ', '_') ext = 'png' if 'Phyrexian' in file_safe_name or file_safe_name in ('C', 'E') else 'gif' return f"<img src=\"/images/mana/{file_safe_name}.{ext}\">"
99a1a7ebf6318d2fbc9c2c24035e5115829b6feb
27,872
import os def is_executable(path): """Return True if the given path is executable""" return os.access(path, os.X_OK)
2fc40d31d0146a28e80911c110646a00b6f86198
27,873
import textwrap def textjoin(text): """ Dedent join and strip text """ text = textwrap.dedent(text) text = text.replace('\n', ' ') text = text.strip() return text
bef921764388857881741f4a8c516ca723a42fd9
27,874
def name_options(options, base_name): """Construct a dictionary that has a name entry if options has name_postfix""" postfix = options.get("name_postfix") if postfix is not None: return { "name": base_name + str(postfix) } return {}
c5db1619fa951298743e28c78b8f62165b5d09de
27,876
def is_mandatory(err: str, words: list) -> bool: """ This function checks whether a word containing the error string exists. :param err: error string :param words: list of words :return: whether the correction can be specified as mandatory """ for word in words: if err in word: return False else: return True
c44dd54733e0ede4072795e42d616533efb69de5
27,878
def first_index(keys, key_part): """Find first item in iterable containing part of the string Parameters ---------- keys : Iterable[str] Iterable with strings to search through key_part : str String to look for Returns ------- int Returns index of first element in keys containing key_part, 0 if not found. """ for i, key in enumerate(keys): if key_part in key: return i return 0
45b41954e795ee5f110a30096aa74ea91f8e6399
27,879
def _calc_shape(original_shape, stride, kernel_size): """ Helper function that calculate image height and width after convolution. """ shape = [(original_shape[0] - kernel_size) // stride + 1, (original_shape[1] - kernel_size) // stride + 1] return shape
46a40efec8c7163ead92425f9a884981e6a4a8bc
27,880
def needs_column_encoding(mode): """ Returns True, if an encoding mode needs a column word embedding vector, otherwise False """ return mode in ["one-hot-column-centroid", "unary-column-centroid", "unary-column-partial", "unary-random-dim"]
d5642d03628357508be87e227c5a9edf8e65da2d
27,881
def intify(i): """If i is a long, cast to an int while preserving the bits""" if 0x80000000 & i: return int((0xFFFFFFFF & i)) return i
eeca1d312d7ca4b5b196a20c0d1c09beac5bc2e6
27,882
import json def decode_frame(frame, tags=None): """ Extract tag values from frame :param frame: bytes or str object :param tags: specific tags to extract from frame :return: dictionary of values """ # extract string and convert to JSON dict framebytes = frame if isinstance(frame, bytes) else frame.bytes if framebytes[-1] == 0: framestring = framebytes[:-1].decode('utf-8') else: framestring = framebytes.decode('utf-8') framedict = json.loads(framestring) # extract tags if tags: if isinstance(tags, str): tags = [tags] tagdict = {k:framedict[k] for k in tags if k in framedict} else: tagdict = framedict return tagdict
1e239c380c7050ff536aa7bfc1cd0b0a01959f39
27,884
import sys def is_ammend(): """ If the commit is an amend, it's SHA-1 is passed in sys.argv[3], hence the length is 4. """ return len(sys.argv) == 4
614771b205ef5e6d877f3642d70e29cb8d00cf21
27,888
import textwrap def construct_using_clause(metarels, join_hint, index_hint): """ Create a Cypher query clause that gives the planner hints to speed up the query Parameters ---------- metarels : a metarels or MetaPath object the metapath to create the clause for join_hint : 'midpoint', bool, or int whether to add a join hint to tell neo4j to traverse form both ends of the path and join at a specific index. `'midpoint'` or `True` specifies joining at the middle node in the path (rounded down if an even number of nodes). `False` specifies not to add a join hint. An int specifies the node to join on. index_hint : bool whether to add index hints which specifies the properties of the source and target nodes to use for lookup. Enabling both `index_hint` and `join_hint` can cause the query to fail. """ using_query = "" # Specify index hint for node lookup if index_hint: using_query = ( "\n" + textwrap.dedent( """\ USING INDEX n0:{source_label}({property}) USING INDEX n{length}:{target_label}({property}) """ ) .rstrip() .format( property=property, source_label=metarels[0][0], target_label=metarels[-1][1], length=len(metarels), ) ) # Specify join hint with node to join on if join_hint is not False: if join_hint is True or join_hint == "midpoint": join_hint = len(metarels) // 2 join_hint = int(join_hint) assert join_hint >= 0 assert join_hint <= len(metarels) using_query += f"\nUSING JOIN ON n{join_hint}" return using_query
61c4dc58782aeb1bc31affb7ec2c74361eac8089
27,889
import sys def mpa2seq(mpa, char_gap="-"):#{{{ """ convert mpa record to seq """ try: li = [] for item in mpa['data']: if type(item) is tuple: li.append(char_gap*(item[1]-item[0])) else: li.append(item) return "".join(li) except KeyError: print("mpa empty", file=sys.stderr) return ""
e834f5ac77d798e5dbc3087bdfa219b99fad22d9
27,890
from typing import List import torch def import_smallsemi_format(lines: List[str]) -> torch.Tensor: """ imports lines in a format used by ``smallsemi`` `GAP package`. Format description: * filename is of a form ``data[n].gl``, :math:`1<=n<=7` * lines are separated by a pair of symbols ``\\r\\n`` * there are exactly :math:`n^2` lines in a file * the first line is a header starting with '#' symbol * each line is a string of :math:`N` digits from :math:`0` to :math:`n-1` * :math:`N` is the number of semigroups in the database * each column represents a serialised Cayley table * the database contains only cells starting from the second * the first cell of each Cayley table is assumed to be filled with ``0`` :param lines: lines read from a file of `smallsemi` format :returns: a list of Cayley tables .. _GAP package: https://www.gap-system.org/Manuals/pkg/smallsemi-0.6.12/doc/chap0.html """ raw_tables = torch.tensor( [list(map(int, list(line[:-1]))) for line in lines[1:]] ).transpose(0, 1) tables = torch.cat( [torch.zeros([raw_tables.shape[0], 1], dtype=torch.long), raw_tables], dim=-1, ) cardinality = int(tables.max()) + 1 return tables.reshape(tables.shape[0], cardinality, cardinality)
2ca9708944379633162f6ef9b4df3357bca77e80
27,891
def assign_format_str(string, *args, **kwargs): """ Format string and save it to variable. {% assign_format_str 'contacts_{lang}.html' lang=LANGUAGE_CODE as tpl_name %} {% include tpl_name %} """ return string.format(*args, **kwargs)
db28016f0cf722fdf5c6f17c2a746639a1c04779
27,892
from typing import Optional from typing import Dict import re def parse_git_uri(uri) -> Optional[Dict[str, str]]: """解释git的路径 Args: uri str: 格式如git@github.com:group/project.git or https://github.com/group/project Returns: dict|None {host: "", group: "", project: ""} """ def _parse(host: str, group: str, project: str): if ' ' in host or ' ' in group or ' ' in project: return None if project.endswith('.git'): project = project[:-4] return {'host': host, 'group': group, 'project': project} p = '([^\\/]+)' if uri[:4] == 'git@': # pattern = "^git@([^\/]+)\:([^\/]+)\/([^\/]+)$" pattern = f"^git@{p}:{p}\\/{p}$" matches = re.match(pattern, uri) if matches: return _parse(*matches.groups()) return None # pattern = "^http[s]?\:\/\/([^\/]+)/([^\/]+)/([^\/]+)$" pattern = f"^http[s]?://{p}/{p}/{p}$" print(pattern) matches = re.match(pattern, uri) if matches: return _parse(*matches.groups()) return None
a71a991c21d5339edfa28db41ac947b02d19d46a
27,895
import os def exists(path): """Determine if a file exists. Args: path (str): Full path to file Returns: bool: True if the file exists, False if not """ return os.path.exists(path) and os.path.isfile(path)
816781213afd43d6dc2b13d39e03ca69ffdf6546
27,896
import re def load_tolerances(fname): """ Load a dictionary with custom RMS limits. Dict keys are file (base)names, values are RMS limits to compare. """ regexp = r'(?P<name>\w+\.png)\s+(?P<tol>[0-9\.]+)' dct = {} with open(fname, 'r') as f: for line in f: match = re.match(regexp, line) if match is None: continue dct[match.group('name')] = float(match.group('tol')) return dct
60af52ec49cadfdb5d0f23b6fa5618e7cc64b4c2
27,897
def is_list(input_check): """ helper function to check if the given parameter is a list """ return isinstance(input_check, list)
9ff5767c862a110d58587cccb641a04532c1a1a5
27,899
def virtual(func): """ This decorator is used to mark methods as "virtual" on the base Middleware class. This flag is picked up at runtime, and these methods are skipped during ravel requrest processing (via Actions). """ func.is_virtual = True return func
953e36a060793cfec95888ab1eb6b34722689e58
27,900
import re def is_valid_hostname(hostname): """ Check if the parameter is a valid hostname. :type hostname: str or bytearray :param hostname: string to check :rtype: boolean """ try: if not isinstance(hostname, str): hostname = hostname.decode('ascii', 'strict') except UnicodeDecodeError: return False if hostname[-1] == ".": # strip exactly one dot from the right, if present hostname = hostname[:-1] # the maximum length of the domain name is 255 bytes, but because they # are encoded as labels (which is a length byte and an up to 63 character # ascii string), you change the dots to the length bytes, but the # host element of the FQDN doesn't start with a dot and the name doesn't # end with a dot (specification of a root label), we need to subtract 2 # bytes from the 255 byte maximum when looking at dot-deliminated FQDN # with the trailing dot removed # see RFC 1035 if len(hostname) > 253: return False # must not be all-numeric, so that it can't be confused with an ip-address if re.match(r"[\d.]+$", hostname): return False allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE) return all(allowed.match(x) for x in hostname.split("."))
31f16d1c648a230de3eb0f3158be42e0841db5a4
27,901
def _mgmtalgomac(rack, chassis, slot, idx, prefix=2): """ Returns the string representation of an algorithmic mac address """ return "%02x:%02x:%02x:%02x:%02x:%02x" % (prefix, rack >> 8, rack & 0xFF, chassis, slot, idx << 4)
ea90898d50d5946abb6e0d6c678e876aa8b5f8cf
27,902
def check_phase(w_no_board, b_no_board, w_board, b_board, player): """ Controllo in base allo stato in quale fase siamo e restituisce il numero della fase :param w_no_board: :param b_no_board: :param w_board: :param b_board: :param player: :return: """ if w_no_board == 0 and b_no_board == 0 and w_board == 3 and player == "W": return 3 elif w_no_board == 0 and b_no_board == 0 and b_board == 3 and player == "B": return 3 elif w_no_board == 0 and b_no_board == 0: return 2 else: return 1
657938fdba7305f9725beec9f062adc93e133c23
27,904
def _model_insert_new_function_name(model): """Returns the name of the function to insert a new model object into the database""" return '{}_insert_new'.format(model.get_table_name())
bd3079813b266a4e792ea323ab59eb3ef377159e
27,905