content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def cube_vertices(position, n): """ Return the vertices of the cube at position with size 2*n. Note that in `pyglet.window.Window`, x-z plane is the ground plane. So here we unpack the position as `(x, z, y)` instead of `(x, y, z)`. """ x, z, y = position return [ # 4 vertices on top face x-n, y+n, z-n, x-n, y+n, z+n, x+n, y+n, z+n, x+n, y+n, z-n, # on bottom face x-n, y-n, z-n, x+n, y-n, z-n, x+n, y-n, z+n, x-n, y-n, z+n, # on left face x-n, y-n, z-n, x-n, y-n, z+n, x-n, y+n, z+n, x-n, y+n, z-n, # on right face x+n, y-n, z+n, x+n, y-n, z-n, x+n, y+n, z-n, x+n, y+n, z+n, # on front face x-n, y-n, z+n, x+n, y-n, z+n, x+n, y+n, z+n, x-n, y+n, z+n, # on back face x+n, y-n, z-n, x-n, y-n, z-n, x-n, y+n, z-n, x+n, y+n, z-n, ]
c45fdf29c8f9ec37bc8269fc80c4bd4ea7e5cfe9
85,943
def make_sharded_tf_record_path(base_path, num_shards): """Makes a sharded tf.Record path with the given number of shards. Args: base_path: a path like "/path/to/tf.record" num_shards: the desired number of shards Returns: a sharded path like "/path/to/tf.record-????-of-1000" """ num_shards_str = str(num_shards) num_digits = len(num_shards_str) return base_path + "-" + "?" * num_digits + "-of-" + num_shards_str
17c766aa982c5335a78d4227d5ea90cf4eea7fce
85,944
def stations_highest_rel_level(stations, N): """For a list of MonitoringStaton objects (stations), returns a list of the N stations at which the water level, relative to the typical range, is highest""" #Filter list as to not include stations without relative water level new_stations = list(filter(lambda station: station.relative_water_level() is not None, stations)) #Sorts stations in descending order of relative water level new_stations.sort(key=lambda station: station.relative_water_level(), reverse = True) #Return first N stations in lists (N stations with highest water level) return new_stations[:N]
61b9a2282678e13cce238b665009368f4e13043b
85,945
def idx2xy(idx): """convert an index to a x-y-coordinate of a Cell""" return [idx % 9 + 1, idx // 9 + 1]
4cbee7ff92e8ddf864af3308f55d5b41ad39bba7
85,946
def all_construct(target: str, word_bank: list[str] | None = None) -> list[list[str]]: """ returns the list containing all the possible combinations a string(target) can be constructed from the given list of substrings(word_bank) >>> all_construct("hello", ["he", "l", "o"]) [['he', 'l', 'l', 'o']] >>> all_construct("purple",["purp","p","ur","le","purpl"]) [['purp', 'le'], ['p', 'ur', 'p', 'le']] """ word_bank = word_bank or [] # create a table table_size: int = len(target) + 1 table: list[list[list[str]]] = [] for i in range(table_size): table.append([]) # seed value table[0] = [[]] # because empty string has empty combination # iterate through the indices for i in range(table_size): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(word)] == word: new_combinations: list[list[str]] = [ [word] + way for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(word)] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(target)]: combination.reverse() return table[len(target)]
80076bdad043158b1fbe958606499694bb53590c
85,947
def get_fieldset(fieldsets, name): """Return a fieldset.""" for it in fieldsets: if it.name == name: return it raise RuntimeError( f"Could not find fieldset {name}. This is not supposed to happen." )
1e9d95ee06218c3935f90ea486eee8aa6b2701d8
85,948
def orientation(p, q, r): """Return positive if p-q-r are clockwise, neg if ccw, zero if colinear.""" return (q[1] - p[1]) * (r[0] - p[0]) - (q[0] - p[0]) * (r[1] - p[1])
b27fef9b1aeb569d0c26cb5ace004f6bd70b3698
85,951
from typing import List from typing import Dict def entity_to_feature_row_dict(entities: List, prefix: str = ''): """Relates entities to its rumber in the feature matrix. Parameters ---------- entities : List[str] A list of entities, named arbitrarily. Usually serve as ID to a different data structure. prefix : str Prefix in the dictionary. Helpful if dict is merged with another dict. Returns ------- entity_to_feature_row_dict: Dict[str, int] Relates a feature row to an identiy feature_row_to_entity_dict: Dict[int, str] Relates an entity to the corresponding feature. i: int Total number feature rows. """ entity_to_feature_row_dict = {} feature_row_to_entity_dict: Dict = {} i = 0 for entity in entities: if prefix == '': entity = '{}'.format(entity) else: entity = '{}_{}'.format(prefix, entity) feature_row_to_entity_dict[i] = entity entity_to_feature_row_dict[entity] = i i = i + 1 return entity_to_feature_row_dict, feature_row_to_entity_dict, i
a4889a3076da6cf230366f4a4c774c2f59a95a59
85,960
import torch def make_directed(nbr_list): """ Check if a neighbor list is directed, and make it directed if it isn't. Args: nbr_list (torch.LongTensor): neighbor list Returns: new_nbrs (torch.LongTensor): directed neighbor list directed (bool): whether the old one was directed or not """ gtr_ij = (nbr_list[:, 0] > nbr_list[:, 1]).any().item() gtr_ji = (nbr_list[:, 1] > nbr_list[:, 0]).any().item() directed = gtr_ij and gtr_ji if directed: return nbr_list, directed new_nbrs = torch.cat([nbr_list, nbr_list.flip(1)], dim=0) return new_nbrs, directed
596b42c5fbe62b51ed68489c6d739026e740da3f
85,964
def ZeroIndex(runs,dia,runindex): """ #accumulate indexes Args: dia:diagonal indexes returned from function DiagonalZero runs:start and stop indexes Results: indexlst: list of indexes """ i=runs[0] j=runs[1] indexlst=[] for i in range(i,j): indexlst+=[dia[i]] return indexlst
0d803b8c28ef48fb2e4f89bb0c71d9c9b91e0078
85,971
def is_pair_sum(pl: list, target: int) -> bool: """Returns True iff target can be made by summing 2 different integers in pl.""" for i in range(len(pl)): for j in range(len(pl)): if i != j: if pl[i] + pl[j] == target: return True return False
1ad3b2f2779e8f3fb6b6f639c001faf50fa3b0e4
85,972
def merge_string_with_overlap(string1, string2): """ Merge two Strings that has a common substring. """ idx = 0 while not string2.startswith(string1[idx:]): idx += 1 return string1[:idx] + string2
9b87d13c3e523fbc8282ad324c65107085c3bc4f
85,984
import torch def merge(*embeddings) -> torch.Tensor: """ Merge N 3-Dimensional embeddings. All embeddings should be the same shape. """ # Interleave embeddings. merge_embeddings = torch.stack(embeddings, dim=embeddings[0].dim()) # Reshape: [108, 49, 1024, 3] -> [108, 49, 3072], then return the Tensor. return torch.flatten(merge_embeddings, start_dim=(embeddings[0].dim()-1))
f9372b12c95a1d5de3623750c5b4624fc34a76a8
85,985
def clean_data(df): """ Function will be used to clean the dataset. Parameters ---------- df: DataFrame A dataframe containing the the heart disease data. Returns ------- df: DataFrame A dataframe containing the cleansed heart disease data. """ df['sex'][df['sex'] == 0] = 'female' df['sex'][df['sex'] == 1] = 'male' df['chest_pain_type'][df['chest_pain_type'] == 0] = 'typical angina' df['chest_pain_type'][df['chest_pain_type'] == 1] = 'atypical angina' df['chest_pain_type'][df['chest_pain_type'] == 2] = 'non-anginal pain' df['chest_pain_type'][df['chest_pain_type'] == 3] = 'asymptomatic' df['fasting_blood_sugar'][df['fasting_blood_sugar'] == 0] = 'lower than 120mg/ml' df['fasting_blood_sugar'][df['fasting_blood_sugar'] == 1] = 'greater than 120mg/ml' df['rest_ecg'][df['rest_ecg'] == 0] = 'normal' df['rest_ecg'][df['rest_ecg'] == 1] = 'ST-T wave abnormality' df['rest_ecg'][df['rest_ecg'] == 2] = 'left ventricular hypertrophy' df['exercise_induced_angina'][df['exercise_induced_angina'] == 0] = 'no' df['exercise_induced_angina'][df['exercise_induced_angina'] == 1] = 'yes' df['st_slope'][df['st_slope'] == 0] = 'upsloping' df['st_slope'][df['st_slope'] == 1] = 'flat' df['st_slope'][df['st_slope'] == 2] = 'downsloping' df['thalassemia'][df['thalassemia'] == 0] = 'normal' df['thalassemia'][df['thalassemia'] == 1] = 'fixed defect' df['thalassemia'][df['thalassemia'] == 2] = 'reversable defect' df['thalassemia'][df['thalassemia'] == 3] = 'reversable defect' return df
d7ec149025fc54c41ef37530e5c27c4eff978ce7
85,986
def get_colour_code(colour): """ This function returns the integer associated with the input string for the ARC problems. """ colour_mapping = {'black':0, 'blue':1, 'red':2, 'green':3, 'yellow':4, 'grey':5, 'pink':6, 'orange':7, 'babyblue':8, 'maroon':9} return colour_mapping[colour]
dcf3fe48026fe205c10bb66ee2ad8a486140e385
85,987
import pickle def read_result(path): """Returns params, result saved in given path.""" params, result = pickle.load(open(path, 'rb')) return params, result
59aa7ac856e7a3a21d895d7164493ced369a6792
85,990
from typing import List def read_input(path: str) -> List[str]: """ Reads game board file from path. Returns list of str. """ with open(path) as file: field = file.readlines() list_of_rows = [row[:-1] if '\n' in row else row for row in field] return list_of_rows
131e2c3d3251ae8976fa786396bad3c71b937996
85,994
def remove_image(client, image_name): """Remove the Docker image""" try: client.remove_image(image_name) return True except: # Failure to remove image is not classified as terrible..for now return False
f7276c0328efab45849224ca4e0814eb4d3ea859
85,995
import re def slugify(text): """ Return a version of the text suitable for use in an url without escaping (only ASCII, no parenthesis etc) """ # slug = unidecode.unidecode(text) # we only deal with ASCII, so skip this slug = text.lower() slug = re.sub(r'\W+', '-', slug) # replace non letters with - slug = re.sub(r'[-]+', '-', slug) # compress multiple sequential - return slug
7301582125d607cf1f034f628ba7b3dcfb197946
86,000
def idfn(val): """Generates names for the parametrized tests.""" if str(val).endswith(".txt"): return val.stem return ""
b8c10167d4fada2058defae4a71ef7a14d561263
86,003
def name_sources(meta, mode): """ From the meta data and the mode, create a filename fragment that names the sources used. """ land_source = '' ocean_source = '' if mode in ('land', 'mixed'): land_source = '.GHCN' if mode in ('ocean', 'mixed'): ocean_source = '.' + meta.ocean_source.upper() return ocean_source + land_source
eac20e44493af751e7fbc54f8a42275a9f601cc9
86,006
def get_module_name(module): """Returns a module's name or None if one cannot be found. Relevant PEP: https://www.python.org/dev/peps/pep-0451/ """ if hasattr(module, '__spec__'): return module.__spec__.name return getattr(module, '__name__', None)
92e345db99cdb03f97657801852af6f3f9e7ce5a
86,007
from typing import OrderedDict def edges_to_dict(edges, dct=None): """Take an iterator of edges and return an ordered dict of sources mapped to lists of destinations. """ if dct is None: dct = OrderedDict() for u, v in edges: dct.setdefault(u, []).append(v) return dct
9c7d54f12b2b85ab62fa6d98e62f9c7454c82fdd
86,009
def is_1D_graph(g): """ Check a graph to see if it is 1D, or a chain of nodes with one or zero parents and children. Parameters ------------ g : networkx Graph Returns ------------ is_1D : bool Is graph 1D or not """ # check degree of sucessors for v in g.succ.values(): if len(v) not in [0, 1]: return False # check degree of predecessors for v in g.pred.values(): if len(v) not in [0, 1]: return False # made it through all checks return True
6967ca4e7bceb68841b8a2275f16bc0911d675d4
86,015
def uniquefy(name, existing): """ Generate a unique name given the requested name and a list of existing names :param name: initial guess of name :param existing: list of existing names :return: a unique name based on the suggested name """ root = (u'%s' % name).replace('-', '').replace(' ', '').strip().lower() choices = [root] + ['{}{}'.format(root, i) for i in range(1, 20)] candidates = sorted((set(choices) - set(existing))) return candidates[0]
3aecedcc0dee9f13bf52f601a906d307d2ee08cd
86,019
def next_card(prize_card, hand, max_card): """ Next-card strategy just picks the next card in the hand (effectively random). """ return hand.cards[0]
509f61b70c38b978453b8646b863f18871523e60
86,025
def get_default_import_string(spider): """the default import string for a spider is [spider.name].items""" return f"{spider.__name__}.items"
a455050d1900ec089dd0c03e5d36e1f8ac67747d
86,027
def mro_lookup(cls, attr, stop=None, monkey_patched=None): """Return the first node by MRO order that defines an attribute. Arguments: cls (Any): Child class to traverse. attr (str): Name of attribute to find. stop (Set[Any]): A set of types that if reached will stop the search. monkey_patched (Sequence): Use one of the stop classes if the attributes module origin isn't in this list. Used to detect monkey patched attributes. Returns: Any: The attribute value, or :const:`None` if not found. """ stop = set() if not stop else stop monkey_patched = [] if not monkey_patched else monkey_patched for node in cls.mro(): if node in stop: try: value = node.__dict__[attr] module_origin = value.__module__ except (AttributeError, KeyError): pass else: if module_origin not in monkey_patched: return node return if attr in node.__dict__: return node
d62247939aa9db593d5b7c8f1a002595a1743d71
86,029
import inspect def change_run_signature(run, run_inputs): """Modifies the signature of the run method of a neural_msi_model. Parameters ---------- run: ``callable`` Function that delegates all the parameters to the run method of a skneuromsi.Config class. run_inputs: ``set`` Set containing the class attributes labeled as run inputs. Returns ---------- run: ``callable`` Run method with a new signature including the run_input parameters. """ signature = inspect.signature(run) self_param = signature.parameters["self"] new_params = [self_param] + [ inspect.Parameter(name, inspect.Parameter.KEYWORD_ONLY) for name in run_inputs ] new_signature = signature.replace(parameters=new_params) run.__signature__ = new_signature return run
45ed2629c3a1143ee7b3c9316e66dc169bfa94a7
86,037
def process_parens(taxon): """subgenera are parenthesized; if this occurs in the name of a species or lower ranked taxon, strip out the subgenus reference; if the taxon is the subgenus, assume the parenthesized portion is the subgenus name, so strip out the genus name that preceeds it and parens. If it doesn't fit the pattern, fail.""" if 'rank' not in taxon: # unfortunate edge case return taxon name = taxon['name'] if taxon['rank'] == 'subgenus': if name.find('(') > -1: open = name.find('(') close = name.find(')') if close > open: name = name[0:open -1] taxon['name'] = name elif name.find('(') > -1: if name.find('(') > -1: open = name.find('(') close = name.find(')') if close > open: name = name[0:open] + name[close+2:] taxon['name'] = name return taxon
141afe9583396fc82222760229fecb31a334f452
86,042
from typing import Callable from typing import List import inspect def get_kwarg_names(function: Callable) -> List[str]: """Retrieve the names of the keyword arguments. Retrieve the names of the keyword arguments accepted by `function` as a list of strings. Parameters ---------- function The function to retrieve keyword argument names from. Returns ------- List[str] The accepted keyword arguments as a list of strings. """ try: argspec = inspect.getfullargspec(function) except TypeError: return [] if argspec.varargs: return argspec.kwonlyargs or [] else: return argspec.args or []
dc3b24e9bfaef12172b14510ba626bddb3a3e9c4
86,045
def ewma_crossovers(dataframe_name): """Creates up and down signals for exponential weighted moving averages Args: dataframe_name (df): Dataframe containing ewma fast and slow indicator data, Returns: A dataframe of: original data passed to function, ewma_cross_up (flt): Signals column (1.0 = True, 0.0 = False), ewma_cross_down (flt): Signals column (-1.0 = True, 0.0 = False) """ # Create signal for crossover band (cross in the up direction) time_crossed_up = dataframe_name.loc[((dataframe_name['ewma_fast'] > dataframe_name['ewma_slow']) & (dataframe_name['ewma_fast'].shift(1) < dataframe_name['ewma_slow'].shift(1))), :].index dataframe_name['ewma_cross_up'] = 0.0 dataframe_name.loc[time_crossed_up, 'ewma_cross_up'] = 1.0 # Create signal for crossover band (cross in the down direction) time_crossed_down = dataframe_name.loc[((dataframe_name['ewma_fast'] < dataframe_name['ewma_slow']) & (dataframe_name['ewma_fast'].shift(1) > dataframe_name['ewma_slow'].shift(1))), :].index dataframe_name['ewma_cross_down'] = 0.0 dataframe_name.loc[time_crossed_down, 'ewma_cross_down'] = -1.0 # Create the crossover combined signal (cross up and down directions) dataframe_name['ewma_crossover_signal'] = dataframe_name['ewma_cross_up'] + dataframe_name['ewma_cross_down'] # Drop cross up and down columns to clean up dataframe dataframe_name.drop(columns=['ewma_cross_up', 'ewma_cross_down'], inplace=True) return dataframe_name
12140d8412f4b8398fe4dac49e3d4e906335fbf9
86,046
def attributify(numeral: str) -> str: """Returns the numeral string argument as a valid attribute name.""" return numeral.replace(" ", "").replace("-", "")
e5337f8e1bdcaa66f4810fe6af0c889f0ba71b00
86,047
def build_offline_user_data_job_operations(client, customer_data): """Builds the schema of user data as defined in the API. Args: client: The Google Ads client. customer_data: Processed customer data to be uploaded. Returns: A list containing the operations. """ customer_data_operations = [] for data_type in customer_data: for item in customer_data[data_type]: # Creates a first user data based on an email address. user_data_operation = client.get_type('OfflineUserDataJobOperation') user_data = user_data_operation.create user_identifier = client.get_type('UserIdentifier') if data_type == 'emails': user_identifier.hashed_email = item['hashed_email'] elif data_type == 'phones': user_identifier.hashed_phone_number = item['hashed_phone_number'] elif data_type == 'mobile_ids': user_identifier.mobile_id = item['mobile_id'] elif data_type == 'user_ids': user_identifier.third_party_user_id = item['third_party_user_id'] elif data_type == 'addresses': user_identifier.address_info.hashed_first_name = item[ 'hashed_first_name'] user_identifier.address_info.hashed_last_name = item['hashed_last_name'] user_identifier.address_info.country_code = item['country_code'] user_identifier.address_info.postal_code = item['postal_code'] user_data.user_identifiers.append(user_identifier) customer_data_operations.append(user_data_operation) return customer_data_operations
3ed4faadcb4ed31b7cd58a62084df884e39967df
86,048
def prime_factor(obj): """ Get the primary factor on the `obj`, returns None if none. """ f = getattr(obj, '_e_factors', None) if f: return f[0], getattr(obj, f[0], None)
ef89988c02845e790fc51df2ff4d44baf6f912fb
86,049
def _calculate_precision(value: str) -> int: """Calculate the precision of given value as a string.""" no_decimal_point = value.replace(".", "") no_leading_or_trailing_zeros = no_decimal_point.strip("0") return len(no_leading_or_trailing_zeros)
be97a9ab0484e052d80033d0d26cd1259a9f6237
86,051
def dice_roll(arg: str): """ Dice roll as number of rolls (eg 6) or as num and sides (2x6)""" num, sides = 1, 6 if arg.count("x") > 1: return None if "x" in arg: num, sides = arg.split("x") else: num = arg try: num = int(num) sides = int(sides) except ValueError: return None if num < 1 or sides < 1: return None return num, sides
34cfde1a3de9e31d0e85ab6d6bdcd4cc502b05db
86,052
def flatten_param(param): """ Turn a parameter that looks like this param[name_one][name_two][name_three] into this param_name_one_name_two_name_three """ param = param.replace(']', '').replace('[', '_').replace('<','').replace('>','') if param.startswith('_'): param = param.replace('_', '', 1) return param
f9f1ee1ab8a65d04c50a8002e568e2946b971e40
86,053
def delete_empty_value_dict(raw_dict: dict): """ This function filters all items of raw_dict that has empty value (e.g. null/none/''...) :param raw_dict: the dict to be filtered """ parsed_dict = {key: value for key, value in raw_dict.items() if value} return parsed_dict if parsed_dict else None
80162e55733c0891f43997eccc11cfe84136b72b
86,054
import csv def read_csv_as_dicts(filename): """ Returns a list of dicts, each containing the contents of a row of the given csv file. The CSV file is assumed to have a header row with the field names. """ rows = [] with open(filename) as csvfile: reader = csv.DictReader(csvfile) for row in reader: rows.append(row) return rows
f204cd2d230181fe025e398828aa58100e21d866
86,055
def _local(tag): """Extract the local tag from a namespaced tag name (PRIVATE).""" if tag[0] == '{': return tag[tag.index('}') + 1:] return tag
00895cb03f968a565de3224caad2f05d72557cdd
86,056
def unlist(d: dict) -> dict: """Find all appearance of single element list in a dictionary and unlist it. :param: d: a python dictionary to be unlisted """ if isinstance(d, list): if len(d) == 1: return d[0] return d if isinstance(d, dict): for key, val in d.items(): if isinstance(val, list): if len(val) == 1: d[key] = unlist(val[0]) elif isinstance(val, dict): unlist(val) return d return d
01e33c65c28ba29a20e0fb59da7b00d3f26d6bd8
86,061
def bit_in_string(string): """ Contains a bit in the string :param string: arbitrary string :return: boolean """ return ('0' in string) or ('1' in string)
b0017304ee7ac82b9889a5d9c7a8cdf058b992bc
86,062
def global_values(flg): """ Change global variables values depending on the flags given. """ global GAME_NAME global MAX_STEPS global MAX_GENERATIONS global POPULATION_COUNT global MUTATION_RATE global FILE_NAME if flg.getFlagName() != None: GAME_NAME = flg.getFlagName() if flg.getFlagMove() != 0: MAX_STEPS = flg.getFlagMove() if flg.getFlagGen() != 0: MAX_GENERATIONS = flg.getFlagGen() if flg.getFlagPop() != 0: POPULATION_COUNT = flg.getFlagPop() if flg.getFlagRate() != 0.0: MUTATION_RATE = flg.getFlagRate() if flg.getFlagSave() != None: FILE_NAME = flg.getFlagSave() elif flg.getFlagLoad() != None: FILE_NAME = flg.getFlagLoad() return None
7d76f2fc4d66bbd19e78d4691667438e967290a0
86,063
def _mock_random_weighted_choice(items, weight_attribute='weight'): # pylint: disable=unused-argument """Mock random_weighted_choice.""" # Always select the first element rather than a random one for the sake of # determinism. return items[0]
21717a467670c6f24d04e6ebc57483ce25cd081f
86,066
def lrelu(features, leak=0.2): """Leaky rectifier. Parameters ---------- features : tf.Tensor Input to apply leaky rectifier to. leak : float, optional Percentage of leak. Returns ------- op : tf.Tensor Resulting output of applying leaky rectifier activation. """ f1 = 0.5 * (1 + leak) f2 = 0.5 * (1 - leak) return f1 * features + f2 * abs(features)
1984a9f8a214e0fc89bb79f96ad42f788c4ad739
86,068
def playagain() -> bool: """ask user to play again""" return input("Would you like to play again (Yes/No)? ").lower().startswith("y")
84d24db06be40540784ba8648e43ea0275223fa8
86,077
def _stripAndRemove(string, remove=None): """Strips whitespace and optional chars from both sides of the target string. Args: target : (str) The string to strip leading and trailing whitespace from remove=None : (str) The string to remove from the string Raises: N/A Returns: (str) The target string after being stripped on either end. """ stringStripped = string.lstrip().rstrip() stringRemoved = stringStripped.replace(remove, '') stringFinal = stringRemoved.lstrip().rstrip() return stringFinal
b570bafa2f4d9d461da2a3132921d7f7f48f37ca
86,081
def get_best_hands_generic(hand_strength_function, board, hands): """ get the index of the best omaha hand given a board :param hand_strength_function: (function) inputs (board, hand), outputs (tuple) :param board: ([str]) list of 5 cards :param hands: ([set(str)]) list of sets of 4 cards :return: ([[int]]) indices of `hands` that makes the strongest omaha hand --> this is a list of lists because it is possible to "chop" with every hand rank except straight flushes, quads and flushes """ hand_strengths = {} for ii, hand in enumerate(hands): hand_strength = hand_strength_function(board, hand) if hand_strength not in hand_strengths: hand_strengths[hand_strength] = [] hand_strengths[hand_strength].append(ii) return [ hand_strengths[hs] for hs in sorted(hand_strengths, reverse=True) ]
c14602019c2feb1ad035bc6d05dd148cdf775abc
86,088
def maybe_quote_ws(value): """Surrounds a value with single quotes if it contains whitespace. """ if value is None: return value if any(x == ' ' or x == '\t' for x in value): return "'" + value + "'" return value
c71e77634021692dbb6166952ee79ef1b339329b
86,091
from typing import Mapping import uuid def predict_block_by_id(block_id: str, neural_model_id: str) -> Mapping[str, float]: """ Predicts block by known block. :param block_id: UUID of known block to predict with :type block_id: str (format: UUID) :param neural_model_id: UUID of neural model to predict with (if not specified will train on non-CV NN found on KnownBlock.well_id.deposit_id) :type neural_model_id: str (format: UUID) :return: Predicted dictionary (keys = rocks UUID, values = probability) :rtype: Mapping[str, float] """ return {str(uuid.uuid4()): 0.1, str(uuid.uuid4()): 0.3, str(uuid.uuid4()): 0.6}
f3da28b59c20afbca3b05d9fbc9e4b0c847bc047
86,098
def all_equal(iterable): """ Check all elements of an iterable (e.g., list) are identical """ return len(set(iterable)) <= 1
751d45318cf70a667568e1f7d4360e2528c0ead2
86,099
def leave_name_in(key, val, dd): """ Under certain conditions, we will remove 'key' and its value from the dictionary we pass to the dropq package. This function will test those conditions and return a Bool. Parameters: ----------- key: a field name to potentially pass to the dropq package dd: the default dictionary of data in taxcalc Parameters Returns: -------- Bool: True if we allow this field to get passed on. False if it should be removed. """ if key in dd: return True elif key in ["elastic_gdp"]: return True else: print("Don't have this pair: ", key, val) underscore_name_in_defaults = "_" + key in dd is_cpi_name = key.endswith("_cpi") is_array_name = (key.endswith("_0") or key.endswith("_1") or key.endswith("_2") or key.endswith("_3")) if (underscore_name_in_defaults or is_cpi_name or is_array_name): return True else: return False
63355e8bfe2d5f09c9b6f2b5bf31ac1967c8c03c
86,100
def fmt_pairs(obj, indent=4, sort_key=None): """Format and sort a list of pairs, usually for printing. If sort_key is provided, the value will be passed as the 'key' keyword argument of the sorted() function when sorting the items. This allows for the input such as [('A', 3), ('B', 5), ('Z', 1)] to be sorted by the ints but formatted like so: l = [('A', 3), ('B', 5), ('Z', 1)] print(fmt_pairs(l, sort_key=lambda x: x[1])) Z 1 A 3 B 5 where the default behavior would be: print(fmt_pairs(l)) A 3 B 5 Z 1 """ lengths = [len(x[0]) for x in obj] if not lengths: return '' longest = max(lengths) obj = sorted(obj, key=sort_key) formatter = '%s{: <%d} {}' % (' ' * indent, longest) string = '\n'.join([formatter.format(k, v) for k, v in obj]) return string
6dc1391d65b10df394e94426fdbde828bf4ae698
86,101
def read_config_file(filename): """ Read the parameters from the configuration file. :param filename: The location of the configuration file. :type filename: str or path :return: The parameters from the configuration file stored in a dictionary. :rtype: dict """ config = dict() with open(filename, 'r') as f: args = f.readlines() for arg in args: if arg == '\n' or arg[0] == '#': continue else: key, value = arg.split("=") config[key.strip()] = value.strip() return config
bac08516c357489c87fc08e3a0a3dc91ab61c52e
86,105
def upperfirst(value): """upper first word of string, and does not change the rest case, such as: foobAr => FoobAr """ if len(value) > 1: return value[0].upper() + value[1:] else: return value[0].upper()
5060fb7f9ede20d2be203f79ddfb30e8a7361105
86,107
from typing import Optional def parse_dict_value_to_float(data: dict, key: str) -> Optional[float]: """Parse value to float or None if value is None.""" try: parsed_float = float(data.get(key)) # type: ignore return parsed_float except ValueError: raise Exception("Could not parse value to float.") except TypeError: return None
6cb965d8d21ed20edf66066c315f0d54de46d659
86,110
def get_optimal_knapsack_value(W, items): """ Gets the highest value the knapsack can carry given items of the form [(weight, value)] up to a weight W Complexity: O(n * S) """ n = len(items) knapsacks = dict() for i in range(n, -1, -1): for j in range(W + 1): if i == n: knapsacks[(i, j)] = 0 continue if j == 0: knapsacks[(i, j)] = 0 continue weight, value = items[i] if weight <= j: knapsacks[(i, j)] = max( knapsacks[(i + 1, j)], knapsacks[(i + 1, j - weight)] + value) else: knapsacks[(i, j)] = knapsacks[(i + 1, j)] return knapsacks[(0, W)]
164babcc97e04b01551bdf3d8b52f0cb7ae3054c
86,111
def base_amount(faan): """ Return the base amount for a given number of faan. Under one-n-two bucks (一二文) and half-spicy increase (半辣上): Faan Amount 0 1 1 2 2 4 3 8 4 16 5 24 6 32 7 48 8 64 9 96 10 128 etc. (i.e. doubling up to 4 faan, then midpoint insertions for odd faan) """ if faan <= 4: return 2 ** faan elif faan % 2 == 1: return 24 * 2 ** ((faan - 5) // 2) else: return 32 * 2 ** ((faan - 6) // 2)
e9aa7269ce704ba77d18767dd53db83581d08147
86,113
def compose_slice_query(search_type, search_term): """Extract a filter query given a form search term and search type Args: search_type(str): example -> "case:" search_term(str): example -> "17867" Returns: slice_query(str): example case:17867 """ slice_query = None if search_term and search_type: slice_query = "".join([search_type, search_term]) return slice_query
e24b4a05fbfbe44e73c903375a22b51cae91b895
86,137
import pwd def get_uid_home(uid): """Get defined uid home path""" return pwd.getpwuid(uid)[5]
50984ad4e891b993fef2d27c4dae0d391dd332e8
86,141
import string def clean_up_punctuation_digits(text): """ Clean up the text from punctuation and digits contained in it. Args: text: (str) The raw text. Returns: The text without the punctuations and the digits. """ punctuation = string.punctuation + string.digits return text.strip(punctuation)
acad6da3e637477f0bb0c79c2fb63401e239204b
86,145
import re def parse_stylesheet_header(css): """Get WordPress theme data from CSS file contents via the comment header. :param css: The contents of the CSS file :type css: string :return: Theme info. See https://codex.wordpress.org/File_Header :rtype: dict """ headers = [ 'Theme Name', 'Theme URI', 'Description', 'Author', 'Author URI', 'Version', 'Template', 'Template Version', 'Status', 'Tags', 'Text Domain', 'Domain Path' ] result = {} for header in headers: regex = re.escape(header + ':') + r'(.*)' match = re.search(regex, css, flags=re.IGNORECASE) if match: result[header.lower().replace(' ', '_')] = match.group(1).strip() return result
115a3bd734ee122dfc6e4b5f76cfad0d655138a3
86,148
def isEventClassMatchingName(eventClass, className): """ Check if the `eventClass` name or it's parent classes equals to `className` """ if eventClass.__name__ == className: return True return any( map(lambda c: isEventClassMatchingName(c, className), eventClass.__bases__))
7aebd413167311c22cffdbecb050f5094aa585e0
86,156
def GetPropertyClassKeywordAttribute(propertyClass, attribute): """ Get the keyword attribute from the property class """ if hasattr(propertyClass, 'keywords'): # starting with blender 2.93 use keywords return propertyClass.keywords[attribute] else: # no longer supported as of blender 2.93 return propertyClass[1][attribute]
a01b22dcecf9debbab80a333534beace9758d0b3
86,157
def resize_image(image, ratio=(775/512)): """Helper function to resize image with specific ration""" new_size = (int(round(image.size[0] / ratio)), int(round(image.size[1] / ratio))) image = image.resize(new_size) return image
0dcbdcffad093a285e6fb9be1ac370345961939d
86,158
def _is_close(d1, d2, atolerance=0, rtolerance=0): """Determines whether two adjacency matrices are within a provided tolerance. d1 : dict(Adjacency dictionary) d2 : dict(Adjacency dictionary) atolerance : Some scalar tolerance value to determine closeness rtolerance : A scalar tolerance value that will be some proportion of ``d2``'s value Returns : boolean close or not """ # Pre-condition: d1 and d2 have the same keys at each level if they # are dictionaries. if not isinstance(d1, dict) and not isinstance(d2, dict): return abs(d1 - d2) <= atolerance + rtolerance * abs(d2) return all(all(_is_close(d1[u][v], d2[u][v]) for v in d1[u]) for u in d1)
5242cb24ac42cf58e9e60edf438bf1f846377f4f
86,160
def load_dist_config(distid): """Creates a configuration with differently distributed decision values for positives and negatives, which will result in differently shaped performance curves. distid = 1: high initial precision (near-horizontal ROC curve) distid = 2: low initial precision (near-vertical ROC curve) distid = 3: standard performance, (circle-segment-like ROC curve) """ if distid == 1: dist_config = {'mean_pos': 1.0, 'sigma_pos': 0.3, 'mean_neg': 0.0, 'sigma_neg': 1.0} elif distid == 2: dist_config = {'mean_pos': 2.0, 'sigma_pos': 2.0, 'mean_neg': 0.0, 'sigma_neg': 1.0} else: dist_config = {'mean_pos': 1.0, 'sigma_pos': 1.0, 'mean_neg': 0.0, 'sigma_neg': 1.0} return dist_config
53b178389956e1d876ec6235e3eaeafbf998cf87
86,161
def split_path(path, abs_path=True): """Splits a URI-encoded path into path segments path A character string containing the path component of a URI. If path is None we treat as for an empty string. abs_path A flag (defaults to True) indicating whether or not the path is relative or absolute. This flag only affects the handling of the empty path. An empty absolute path is treated as if it were '/' and returns a list containing a single empty path segment whereas an empty relative path returns a list with no path segments, in other words, an empty list. The return result is always a list of character strings split from *path*. It will only end in an empty path segment if the path ends with a slash.""" if path: if abs_path: if ord(path[0]) != 0x2F: raise ValueError("Abs path must be empty or start with /") return path.split("/")[1:] else: return path.split("/") elif not abs_path: # relative paths always have an empty segment return [''] else: return []
798afac75b787a693a446ba15fb571f0e1592ab4
86,169
def get_extent(ds): """ Returns the bounding box of tiff image. """ geo_t = ds.GetGeoTransform() x_size, y_size = ds.RasterXSize, ds.RasterYSize xmin = min(geo_t[0], geo_t[0] + x_size * geo_t[1]) xmax = max(geo_t[0], geo_t[0] + x_size * geo_t[1]) ymin = min(geo_t[3], geo_t[3] + y_size * geo_t[5]) ymax = max(geo_t[3], geo_t[3] + y_size * geo_t[5]) return xmin, xmax, ymin, ymax
48b05d42e826ea79cc8afc1a884ad329e1b8dbb5
86,170
def listdirs(directory): """List all the subdirectories of a directory""" return [path for path in directory // "*" if path.is_dir()]
3543a4cc9f8d8e19f914c4c811025115fd3eef6e
86,176
import re def approx_field_predicate(field, value, args): """Return a function that does an approximate match of a string.""" flags = re.I if args.ignore_case else 0 def _approx_match(entry): field_value = getattr(entry, field, None) if field_value is None: return False else: return re.search(value, field_value, flags) return _approx_match
2cd55c8ebedbe39e231a57a9fb66f1e4c4107bb6
86,178
import time import socket import struct import zlib def composeMsg(addr_from: tuple, addr_to: tuple, msg_type: int, data: bytes, fmap: dict, timestamp=None): """ Compose a message according to protocol, see CARIBIC@350_CommunicationSpecs_Rev0_20210319. """ if timestamp is None: timestamp = int(time.time()*1e9) # pack message content to binary string packet = (socket.inet_aton(addr_from[0]) + struct.pack(fmap['addr_from_port'], addr_from[1]) + socket.inet_aton(addr_to[0]) + struct.pack(fmap['addr_to_port'], addr_to[1]) + b'\x00\x00' + # place holder bytes for packet length struct.pack(fmap['ts'], timestamp) + struct.pack(fmap['type'], msg_type) + data) # insert packet length, including checksum bytes packet = packet[:12] + struct.pack(fmap['len'], len(packet)+4) + packet[14:] # add checksum packet += struct.pack(fmap['cs'], zlib.crc32(packet)) # zlib.adler32(packet)) return packet
d31add68bcb786b2f0d39af97159eca396381cbf
86,185
def translate_num_days_to_plot_title(num_days): """Translate a date range to a plot title. Args: num_days: An integer specifying how many days back to look. Returns: A string to be used for the pie plot title. """ if num_days == 7: return "last week's jobs" # Get the number of weeks num_weeks = num_days / 7 if num_weeks.is_integer(): num_weeks_str = str(int(num_weeks)) else: num_weeks_str = "{0:.1f}".format(num_weeks) return "last %s weeks' jobs" % num_weeks_str
9ccdc8e8025a95eda2b78fbc3e679463bf2ede6d
86,187
from typing import List import configparser def kafka_topics(config_path: str) -> List: """Read configuration from config.ini file and returns a list of kafka topics to be consumed. Args: config_path (str): The path for the config.ini file. Returns: List: of Kafka topics """ config = configparser.ConfigParser() config.read(config_path) topics = [] for c in config.sections(): if c.startswith("website_"): topic = c.replace("website_", "") topics.append(topic) return topics
23e604b593f85c86a94512c7ee2b0a890719faf3
86,188
def ResolveSubnetURI(project, region, subnet, resource_parser): """Resolves the URI of a subnet.""" if project and region and subnet and resource_parser: return str( resource_parser.Parse( subnet, collection='compute.subnetworks', params={ 'project': project, 'region': region })) return None
14e9594b73d3ba4d2f89222423827136c1c93d7a
86,191
def get_newly_added_slot_name_cmd(mts, lpar_id, slotnum): """ run cfgdev of the newly configured virtual slot and return the VIOS device name for that slot. :param mts: PowerVM MTS string for virtual adapter :param lpar_id: LPAR id :param slotnum: virtual slot number :returns: A VIOS command that returns the device name based on physloc """ return ('ioscli cfgdev -dev vio0 && ioscli lsdev -plc ' '%(mts)s-V%(lparid)s-C%(slot)s-T1 -fmt : ' ' -field name' % {'mts': mts, 'lparid': lpar_id, 'slot': slotnum})
67e7ee5311deebf2ec68e7d47617c2f694e15da8
86,193
def show_attrs(dataset): """Return formatted string of attributes for hdf object""" out = '%s with %d attrs\n' % (dataset, len(dataset.attrs)) out += '%s\n' % dataset.name for key, value in dataset.attrs.items(): out += '%30s : %s\n' % (key, value) return out
5fa58a7a22beb9ce65d01ece36a4c0388efd260b
86,196
def twos_comp(val, bits): """compute 2's complement """ if val & (1 << (bits - 1)): val = val - (1 << bits) return val
e75365486aa73d45abc1abe7b4e5eda796dffe36
86,197
from typing import Any def validate_beta_scale(beta_scale: Any) -> float: """ Args: beta_scale (Any): scaling factor for beta Raises: ValueError: If beta is smaller than 0 Returns: float: scaling factor for beta """ if beta_scale < 0: raise ValueError("The beta_scale values must be positive") return beta_scale
c6dfb496c9b41cc7ea93b6baeb18b1575c699e4d
86,203
def equiv_referenced_ids(ann): """ Given a line with an Equiv annotation, returns a collection containing the IDs referred to. """ fields = ann.split("\t") if len(fields) < 2: return [] args = fields[1].split(" ") return args[1:]
104ca349e9356480edb217326b06202b559caa89
86,205
def col_to_num(col): """ Converts an excel-like column name (e.g. AA) into a number (e.g. 27) """ num = 0 for c in col: num = num * 26 + (ord(c.upper()) - ord('A')) + 1 return int(num)
512a89c1d1e4d61f1afe860a51ed1810cec5255c
86,206
from typing import List def collection_as_dict(collection: list) -> List[dict]: """ Convert from list of Spark Rows to list of dicts. Parameters ---------- collection : list List of Spark Rows Returns ------- map An iterable list of dicts """ return [item.asDict() for item in collection]
3c3714558805a6c7c0f36b21491a01afdb010dcf
86,212
def directory_fmt(directory): """In ensure that directories end with '/'. Frequently we need to ensure that directory paths end with a forward slash. Pythons dirname and split functions in the path library treat this inconsistently creating this requirement. This function is simple but was written to centralize documentation of an often used (and often explained) requirement in this codebase. >>> os.path.dirname('gs://bucket/folder/file.txt') 'gs://bucket/folder' >>> directory_fmt(os.path.dirname('gs://bucket/folder/file.txt')) 'gs://bucket/folder/' >>> os.path.dirname('/newfile') '/' >>> directory_fmt(os.path.dirname('/newfile')) '/' Specifically we need this since copy commands must know whether the destination is a directory to function properly. See the following shell interaction for an example of the inconsistency. Notice that text files are copied as expected but the bam is copied over the directory name. Multiple files copy, works as intended in all cases: $ touch a.txt b.txt $ gsutil cp ./*.txt gs://mybucket/text_dest $ gsutil ls gs://mybucket/text_dest/ 0 2017-07-19T21:44:36Z gs://mybucket/text_dest/a.txt 0 2017-07-19T21:44:36Z gs://mybucket/text_dest/b.txt TOTAL: 2 objects, 0 bytes (0 B) Single file copy fails to copy into a directory: $ touch 1.bam $ gsutil cp ./*.bam gs://mybucket/bad_dest $ gsutil ls gs://mybucket/bad_dest 0 2017-07-19T21:46:16Z gs://mybucket/bad_dest TOTAL: 1 objects, 0 bytes (0 B) Adding a trailing forward slash fixes this: $ touch my.sam $ gsutil cp ./*.sam gs://mybucket/good_folder $ gsutil ls gs://mybucket/good_folder 0 2017-07-19T21:46:16Z gs://mybucket/good_folder/my.sam TOTAL: 1 objects, 0 bytes (0 B) Args: directory (str): a uri without an blob or file basename. Returns: the directory with a trailing slash. """ return directory.rstrip('/') + '/'
9e3bb8c6dfab5a1f3eed2aabe821a1aeced6123f
86,217
def rtable_q_dest(route_entry): """ Args: route_entry: (dict) Returns: string: returns route table destination """ if route_entry.get('DestinationCidrBlock'): return route_entry.get('DestinationCidrBlock') elif route_entry.get('DestinationPrefixListId'): return route_entry.get('DestinationPrefixListId') else: return 'no-record'
8c57cc07e1196098afb79120f49412e96b4fc52f
86,223
def strip_leading_underscores(attribute_name): """ Strip leading underscores from *attribute_name*. Used by default by the ``init_aliaser`` argument of :class:`Attribute`. :param attribute_name: The original attribute name to mangle. :type attribute_name: str :rtype: str """ return attribute_name.lstrip("_")
b9e3462cca69a8b1528c97e86cfdbc5df748007c
86,224
def alpha_from_k_half_mode(khalf, beta, gamma): """Calculates alpha, given the half-mode wavenumber""" return 1./khalf * (2.**(1./gamma) - 1. )**(1./beta)
4eee2694ca05b5a6ebe424791e465e6f954a9f25
86,226
def lambda2(field, method=None): """ Computes the lambda2 criterion of the field. Parameters ---------- field : Field2D method: str, optional A method for calculation of the lambda2 criterion. Valid options are: ... Returns ------- Field2D Author(s) --------- Jia Cheng Hu """ ddx = field.ddx(method) ddy = field.ddy(method) return (ddx.u(0)+ddy.u(1))**2 - 4*(ddx.u(0)*ddy.u(1) - ddy.u(0)*ddx.u(1))
814ba99a496a029626c6f1823c027106493b6155
86,227
def timesince(start_time, end_time, default="1天"): """ Returns string representing "time since" e.g. 3 days ago, 5 hours ago etc. """ diff = end_time - start_time if end_time > start_time: periods = ( (diff.days / 365, "年"), (diff.days / 30, "个月"), # (diff.days / 7, "周"), (diff.days, "天"), # (diff.seconds / 3600, "小时"), # (diff.seconds / 60, "分钟"), # (diff.seconds, "秒"), ) for period, unit in periods: if period: return "%d%s" % (period, unit) return default
c23cf076a72e9cb1521bb7a8e5f648b75ae54fe1
86,229
def format_bytes_size(val): """ Take a number of bytes and convert it to a human readable number. :param int val: The number of bytes to format. :return: The size in a human readable format. :rtype: str """ if not val: return '0 bytes' for sz_name in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']: if val < 1024.0: return "{0:.2f} {1}".format(val, sz_name) val /= 1024.0 raise OverflowError()
71f559d083043454886f91974b86645ef4ee1132
86,230
def _get_spice_ref(part): """Return a SPICE reference ID for the part.""" if part.ref.startswith(part.ref_prefix): return part.ref[len(part.ref_prefix) :] return part.ref
36f881e80c8557951d85b591a47c22953dac0782
86,235
def get_product_from_prokka_fasta_header(fasta_header: str) -> str: """ Grabs the gene portion of a .ffn or .faa fasta header """ contig, delim, product = fasta_header.partition(" ") return product
599cddcb449403880f6b1c2345043a909ff8a250
86,238
def ret_earnings(bre, net_income, dividend): """ Computes ending retained earnings. Parameters ---------- bre : int or float Beginning retained earnings (at the beginning of the period) net_income : int or float Net income dividend : int or float Dividend payment to shareholders Returns ------- out : int or float Ending retained earnings (at the end of the period) """ return (bre + net_income) - dividend
133a2f6968c79a799b8e5c7ab271732a6e9dc00d
86,247
def filter(s): """ Filters a plain text and makes it acceptable for docbook """ if s == None: return "" s = s.replace(">", "&gt;") s = s.replace("<", "&lt;") return s
ba31296b64a9d0ca4c327531600ce35cbcec630a
86,248
def bert_qa_inputs(ids_name, mask_name, segment_ids_name): """Creates the input tensor names of a Bert model in order. The names correspond to `Tensor.name` in the TFLite schema. It helps to determine the tensor order when populating the metadata. Args: ids_name: name of the ids tensor, which represents the tokenized ids of input text as concatenated query and passage. mask_name: name of the mask tensor, which represents the mask with 1 for real tokens and 0 for padding tokens. segment_ids_name: name of the segment ids tensor, where 0 is for query and 1 is for passage tokens. Returns: The input name list. """ return [ids_name, mask_name, segment_ids_name]
b547321f87545c3def95fa8be262f4ea5ed220b8
86,250
def _index_of_end(str, part): """If part is in str, return the index of the first character after part. Return -1 if part is not in str.""" index = str.find(part) if index >= 0: return index + len(part) return -1
c5da2392124959e2c298602fe0ee88563c2fd599
86,251
import importlib def module_installed(module: str) -> bool: """ Determines if a given module string can be resolved Determine if the module referenced by string, can be imported by trying an import in two ways: - direct import of the module - import of the module minus the last part, then see if the last part is an attribute of the module. Parts of course, are separated by dots. :param module: module reference :return: True if importable, False otherwise """ have_module = False try: importlib.__import__(module) except ModuleNotFoundError: mod_path, dot, cls = module.rpartition('.') if not mod_path: return False try: mod = importlib.import_module(mod_path) except ModuleNotFoundError: return False else: if hasattr(mod, cls): have_module = True else: have_module = True return have_module
475d9aeb9638bac4c3bad5e310fd3e8ce4e37217
86,253
def subvector(y, head, tail): """Construct a vector with the elements v[head], ..., v[tail-1].""" result = [] for i in range(head, tail, 1): result.append(y[i]) return result
d36baa37f43c4f2922849e5a6afe5729769b722e
86,254
import requests def is_remote_netcdf(ds_str): """ Check a remote path points to a NetCDF resource. Parameters ---------- ds_str (str): remote path to a dataset Returns ------- bool """ # Some datasets do not support HEAD requests! The vast majority will, # however, support GET requests try: head_req = requests.head(ds_str, allow_redirects=True, timeout=10) head_req.raise_for_status() except: content_type = None else: content_type = head_req.headers.get("content-type") # if the Content-Type header returned was "application/x-netcdf", # or a netCDF file (not OPeNDAP) we can open this into a Dataset return content_type == "application/x-netcdf"
64a36676fff5eb28e4a07e75a93f4b23f2752245
86,261
from pathlib import Path def is_leaf_path(p: Path) -> bool: """Tests whether a path corresponds to a file or empty folder, i.e. some leaf item in a file-system tree structure """ return p.is_file() or (p.is_dir() and not any(p.glob("*")))
aab1eacb2936713fdfa87af7c0dee8cfb6f9d5a4
86,274