content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os def tmp_webhook(tmp_path, monkeypatch): """Create a temporary file for an actions webhook event.""" tmp_file_path = tmp_path / "event.json" monkeypatch.setenv("GITHUB_EVENT_PATH", os.fspath(tmp_file_path)) return tmp_file_path
a1733b18a831d1a168b1bde8f4cbc616aa511064
24,418
def test_image(request, dials_regression): """Fixture to allow tests to be parametrized for every test image""" return request.param
e46a57821f0773e8c2451fdd69e9e120b0099654
24,419
def filtro_demografico(x,col,genero,cantidad): """ Entrega un dataframe con una cantidad sugerida modificable x: dataframe con la información col: columna para filtrar el dataframe (EJ: Género) genero: valor que filtra el data frame (Ej: 'Comedy') cantidad: Cantidad de datos sugeridos que solicite :return: dataframe con las películas sugeridas. """ mask=x[col]==genero x=x[mask] v=x['vote_count'] R=x['vote_average'] C= x['vote_average'].mean() m= x['vote_count'].quantile(0.9) score=(v/(v+m) * R) + (m/(m+v) * C) q_movies = x.copy().loc[x['vote_count'] >= m] q_movies['score'] = score q_movies = q_movies.sort_values('score', ascending=False) return q_movies[['title', 'vote_count', 'vote_average', 'score']].head(cantidad)
914dfdce2f06649409a208d460b07206872f6707
24,420
import logging def mageckcount_checklists(args): """ Read sgRNAs and associated sequences and lists format: sgRNAid seq geneid """ genedict={}; hascsv=False; if args.list_seq.upper().endswith('CSV'): hascsv=True; n=0; seqdict={}; for line in open(args.list_seq): if hascsv: field=line.strip().split(','); else: field=line.strip().split(); n+=1; if field[0] in genedict: logging.warning('Duplicated sgRNA label '+field[0]+' in line '+str(n)+'. Skip this record.'); continue; if len(field)<3: logging.warning('Not enough field in line '+str(n)+'. Skip this record.'); continue; if field[1].upper() in seqdict: logging.warning('Duplicated sgRNA sequence '+field[1]+' in line '+str(n)+'. Skip this record.'); continue; genedict[field[0]]=(field[1].upper(),field[2]); logging.info('Loading '+str(len(genedict))+' predefined sgRNAs.'); return genedict;
6bdf988585c6462a22efabc142c6b4fa319e369f
24,421
def _num_matured(card_reviews): """Count the number of times the card matured over the length of time. This can be greater than one because the card may be forgotten and mature again.""" tot = 0 for review in card_reviews.reviews: if review.lastIvl < 21 and review.ivl >= 21: tot += 1 return tot
fe955d7a1322b995b12d030337008b99da9f8c1f
24,422
import re import fnmatch def build_pattern(pattern_algorithm: str, pattern: str, case_insensitive: bool) -> re.Pattern[str]: """ Build a matching object from the pattern given. :param pattern_algorithm: A pattern matching algorithm. :param pattern: A pattern text. :param case_insensitive: True if the matching is performed in case-insensitive, False otherwise. :return A matching object built. """ if pattern_algorithm == 'basic': pattern = re.escape(pattern) elif pattern_algorithm == 'wildcard': pattern = fnmatch.translate(pattern) elif pattern_algorithm == 'regex': pass else: raise ValueError(f'Invalid pattern algorithm: {pattern_algorithm}') return re.compile(pattern, re.IGNORECASE if case_insensitive else 0)
9935285e3590d285aa97a3eaa6cd9bc17cc3fc32
24,423
import math def rotation_matrix_to_quaternion(a): """ Converts a rotation matrix into a quaternion """ trace = a[0][0] + a[1][1] + a[2][2] if trace > 0: s = 0.5 / math.sqrt(trace + 1.0) w = 0.25 / s x = (a[2][1] - a[1][2]) * s y = (a[0][2] - a[2][0]) * s z = (a[1][0] - a[0][1]) * s elif a[0][0] > a[1][1] and a[0][0] > a[2][2]: s = 2.0 * math.sqrt(1.0 + a[0][0] - a[1][1] - a[2][2]) w = (a[2][1] - a[1][2]) / s x = 0.25 * s y = (a[0][1] + a[1][0]) / s z = (a[0][2] + a[2][0]) / s elif a[1][1] > a[2][2]: s = 2.0 * math.sqrt(1.0 + a[1][1] - a[0][0] - a[2][2]) w = (a[0][2] - a[2][0]) / s x = (a[0][1] + a[1][0]) / s y = 0.25 * s z = (a[1][2] + a[2][1]) / s else: s = 2.0 * math.sqrt(1.0 + a[2][2] - a[0][0] - a[1][1]) w = (a[1][0] - a[0][1]) / s x = (a[0][2] + a[2][0]) / s y = (a[1][2] + a[2][1]) / s z = 0.25 * s return w, x, y, z
b6dd0d3c000d65c432de98341650999fe37ce103
24,424
def _check_dir(): """Checks if the plottable data is located in the same folder as the main function, by checking if wavefuncs.dat is present. Returns: isdir (bool): is plottable data in default directory """ try: fp = open("wavefuncs.dat", "r") except FileNotFoundError: msg = "Plottable data not found or incomplete in default directory." print(msg) isdir = False return isdir else: fp.close() isdir = True return isdir
6d0dfc1b74990e81fce889ad6105839959828397
24,425
import requests def getHtml(url): """ 伪装头部并得到网页内容 """ #r = requests.Session() #html_bytes = r.get(url) html_bytes = requests.get(url) html_string = html_bytes.text return html_string
97d4c329f9de1fe970eb941349298b4efcb75f23
24,426
def multiwrapper(func,x,p,np): """ If you want several components, this will iteratively add them. """ puse = [] y = 0 for i in range(len(p)): puse.append(p[i]) if len(puse)%np == 0: y += func(x, puse ) puse = [] return y
3901061bcb20dd7e0351ca31d8a92fc491b853ce
24,427
def search(metadata, keyword, include_readme): """Search for the keyword in the repo metadata. Args: metadata (dict) : The dict on which to run search. keyword (str) : The keyword to search for. include_readme (bool) : Flag variable indicating whether to search keyword inside the README.md or not. Returns: list : List of repo names which confirm with the search. """ keyword = keyword.lower() result = list() for action, action_metadata in metadata.items(): if keyword in action.lower(): result.append(action) elif include_readme: if keyword in action_metadata['repo_readme'].lower(): result.append(action) return result
a7ff3900547518fbf3e774df8954d383ba1227b5
24,428
def format_moz_error(query, exception): """ Format syntax error when parsing the original query. """ line = exception.lineno column = exception.col detailed_message = str(exception) msg = query.split('\n')[:line] msg.append('{indent}^'.format(indent=' ' * (column - 1))) msg.append(detailed_message) return '\n'.join(msg)
fb558e1c3149aa9191db76c153e24a9435f6bec3
24,430
def bitarray_to_bytes(b): """Convert a bitarray to bytes.""" return b.tobytes()
d4851f60a056953c3862968e24af5db5597e7259
24,431
def process_data(data): """ Create a data structure to represent a variable @return a dict """ variable = {} time_step = set() time_averaging = set() notes = set() cmip6_cmor_tables_row_id = set() col_anomaly_type = 2 col_time_step = 3 col_long_name = 4 col_description = 5 col_plot_label = 6 col_standard_name = 7 col_units = 8 col_label_units = 9 col_level = 10 col_cmip6_var_id = 11 col_time_averaging = 12 col_observations = 13 col_marine = 14 col_land_strand_1 = 15 col_land_strand_2 = 16 col_land_strand_3_12km = 17 col_land_strand_3_2km = 18 col_notes = 19 col_um_stash = 20 col_cmip6_standard_name = 21 col_cmip6_cmor_tables_row_id = 22 for row in data: row = [item.strip() for item in row] if variable == {}: # these fields are common to all the rows for a variable variable['long_name'] = row[col_long_name] variable['description'] = row[col_description] variable['plot_label'] = row[col_plot_label] if (row[col_anomaly_type] != "" and row[col_anomaly_type] != "none"): variable['anomaly_type'] = row[col_anomaly_type] try: if (row[col_standard_name] != "" and row[col_standard_name] != "None"): variable['standard_name'] = row[col_standard_name] if row[col_units] != "": variable['units'] = row[col_units] if row[col_label_units] != "": variable['label_units'] = row[col_label_units] if row[col_level] != "": variable['level'] = row[col_level] if (row[col_cmip6_var_id] != "" and row[col_cmip6_var_id] != "None"): variable['cmip6_name'] = row[col_cmip6_var_id] if row[col_um_stash] != "": variable['um_stash'] = row[col_um_stash] if (row[col_cmip6_standard_name] != "" and row[col_cmip6_standard_name] != "None"): variable['cmip6_standard_name'] = row[col_cmip6_standard_name] except IndexError: pass # now sort out the differences between rows if row[col_time_step] != "": time_step.add(row[col_time_step]) try: if row[col_time_averaging] != "": time_averaging.add(row[col_time_averaging]) if row[col_notes] != "": notes.add(row[col_notes]) if (row[col_cmip6_cmor_tables_row_id] != "" and row[col_cmip6_cmor_tables_row_id] != "None"): cmip6_cmor_tables_row_id.add(row[col_cmip6_cmor_tables_row_id]) except IndexError: pass if len(time_step) > 0: variable['time_step'] = sorted(list(time_step)) if len(time_averaging) > 0: variable['time_averaging'] = sorted(list(time_averaging)) if len(notes) > 0: variable['notes'] = sorted(list(notes)) if len(cmip6_cmor_tables_row_id) > 0: variable['cmip6_cmor_tables_row_id'] = sorted( list(cmip6_cmor_tables_row_id)) return variable
3e96f576d54bc33ffadafde184ab932e741182e3
24,432
def get_field_value(instance, field_name, use_get): """Get a field value given an instance.""" if use_get: field_value = instance.get(field_name) else: field_value = getattr(instance, field_name, '') return field_value
71e89e9c0f52a71d78bc882d45ae2dda8100a1f7
24,435
def apply_reaction(reactant, vec): """Takes a reactant and a reaction vector. Applies vector to those reactions. Returns a list of resultant vector sum. """ vecsum = reactant + vec return vecsum
903765b92aac0dda91249d504d45e4987774b3c2
24,436
def removesuffix(s, suffix): """ Removes suffix a string :param s: string to remove suffix from :param suffix: suffix to remove :type s: str :type suffix: str :return: a copy of the string with the suffix removed :rtype: str """ return s if not s.endswith(suffix) else s[:-len(suffix)]
7503e7ca390434936cdda8f706f5ca1eb7b80b98
24,437
def chomp(text): """ Removes the trailing newline character, if there is one. """ if not text: return text if text[-2:] == '\n\r': return text[:-2] if text[-1] == '\n': return text[:-1] return text
8813631dd201a281dd4d879b7ec8d87e9425b9c7
24,438
def to_list(inp): """ Convert to list """ if not isinstance(inp, (list, tuple)): return [inp] return list(inp)
55350d48bd578252213710fd4c5e672db8ba1f8e
24,439
import re def setSearchPath(sql: str, schema: str) -> str: """ Set the search_path parameters if postgis database """ prefix = 'SET search_path = "%s", public, pg_catalog;' % schema if re.search('^BEGIN;', sql): sql = sql.replace('BEGIN;', 'BEGIN;%s' % prefix) else: sql = prefix + sql return sql
ea3ee102ea4d72cde7dfabeccf4750e7e8fe0c2a
24,440
def get_ra(b, d): """ Converts birth and death to turnover and relative extinction rates. Args: b (float): birth rate d (float): extinction rate Returns: (float, float): turnover, relative extinction """ return (b - d, d / b)
64e6e9752c623b17745de92ecfe2ce96c43bc381
24,441
def all_angles_generator(board: list): """ This function is a generator for all angles of a board >>> board = [\ "**** ****",\ "***1 ****",\ "** 3****",\ "* 4 1****",\ " 9 5 ",\ " 6 83 *",\ "3 1 **",\ " 8 2***",\ " 2 ****"] >>> [a for a in all_angles_generator(board)][0] [' ', ' ', '3', '1', ' ', '9', ' ', '5', ' '] """ def angle(start_i, start_j, board): return [ board[i][start_j] for i in range(start_i, start_i + 5) ] + [ board[start_i + 4][j] for j in range(start_j + 1, start_j + 5) ] for start_j, start_i in zip(range(4, -1, -1), range(5)): yield angle(start_i, start_j, board)
3ea04a9b1aea2236e6bbc1595982cc8d50f9dbb3
24,446
import math def htmTriangleArea(level): """htmTriangleArea. Args: level: """ skyInSqDegrees = 4.0 * math.pi * (180.0/math.pi)**2 skyInSqArcsec = skyInSqDegrees * 3600.0 ** 2 triangleArea = skyInSqArcsec/(8*4**level) return triangleArea
d585402efb53f7cb4e5bbc4c3eaf77900c4c1af3
24,447
def tokenize_file(file, split="lines", separator=")"): """ Separates lists into tokens of either(chars, words, line, function, file). :param file: List to tokenize. :param split: type of tokenization ("char", "word", "line", "function", "file"). :param separator: optional separator for function, only when using "function" type. :return: A new list of tokens, ready for vectorization or feature extraction. """ new_file = [] for line in file: # for each file if split == "chars": # separate into chars for character in line: new_file.append(character) elif split == "words": # separate into words token = line.rpartition(" ") for word in token: if word != "": new_file.append(word) elif split == "lines": # separate into lines token = line.rpartition("/n") for word in token: if word != "": new_file.append(word) elif split == "functions": # separate into end of line/functions token = line.rpartition(separator) for word in token: if word != "": new_file.append(word) # end of lines for loop if split == "files": # separate into file temp = "" for word in file: if word != "": temp += word new_file.append(temp) return new_file
63e235a6fdecdb749fe3235eda8854a4945f6273
24,448
def getVanDerWaalsVolumes(): """ storage of relative Van der Waals volumes for the volume desolvation model Note, uses a 'united-atom' description. """ VanDerWaalsVolume = {'C': 1.40, # 20.58 all 'C' and 'CA' atoms 'C4': 2.64, # 38.79 hydrodphobic carbon atoms + unidentified atoms 'N': 1.06, # 15.60 all nitrogen atoms 'O': 1.00, # 14.71 all oxygen atoms 'S': 1.66, # 24.43 all sulphur atoms } return VanDerWaalsVolume
0f3fe8a8d551094f744d7c18354ee12b6c3911e8
24,449
import os def get_child_file_or_folder_name(path): """ :param str path: The full path to the child file or folder. :return str: The file or folder name. Returns the child file or folder name, given a full path. """ return path.split(os.sep)[-1]
d9603cab344c9bed752f10445daf544be4803fe0
24,450
def read_fastq(fastq_fn): """ Read a fastq file and return the data :param fastq_fn: :return: seq: (list) a list contains four lines of a fastq file """ try: f = open(fastq_fn, "r") seq = [] for line in f: seq.append(line) f.close() return seq except IOError: print("File not found.")
347d35bab1e7b3761efb53fecfa98de65e486bf2
24,451
def demcfitter(time, data, model, uncertainty=None, verbose=True, **kwargs): """Use Differential Evolution Markov Chain Parameters ---------- data: sequence The observational data model: ExoCTK.lightcurve_fitting.models.Model The model to fit uncertainty: np.ndarray (optional) The uncertainty on the (same shape) data method: str The name of the method to use name: str A name for the best fit model verbose: bool Print some stuff Returns ------- demc.Model.fit.fit_report The results of the fit """ best_model = None return best_model
de7b277c51dc4053fb86e206708278381e2835d5
24,452
def GetBraviasNum(center,system): """Determine the Bravais lattice number, as used in GenHBravais :param center: one of: 'P', 'C', 'I', 'F', 'R' (see SGLatt from GSASIIspc.SpcGroup) :param system: one of 'cubic', 'hexagonal', 'tetragonal', 'orthorhombic', 'trigonal' (for R) 'monoclinic', 'triclinic' (see SGSys from GSASIIspc.SpcGroup) :return: a number between 0 and 13 or throws a ValueError exception if the combination of center, system is not found (i.e. non-standard) """ if center.upper() == 'F' and system.lower() == 'cubic': return 0 elif center.upper() == 'I' and system.lower() == 'cubic': return 1 elif center.upper() == 'P' and system.lower() == 'cubic': return 2 elif center.upper() == 'R' and system.lower() == 'trigonal': return 3 elif center.upper() == 'P' and system.lower() == 'hexagonal': return 4 elif center.upper() == 'I' and system.lower() == 'tetragonal': return 5 elif center.upper() == 'P' and system.lower() == 'tetragonal': return 6 elif center.upper() == 'F' and system.lower() == 'orthorhombic': return 7 elif center.upper() == 'I' and system.lower() == 'orthorhombic': return 8 elif center.upper() == 'A' and system.lower() == 'orthorhombic': return 9 elif center.upper() == 'B' and system.lower() == 'orthorhombic': return 10 elif center.upper() == 'C' and system.lower() == 'orthorhombic': return 11 elif center.upper() == 'P' and system.lower() == 'orthorhombic': return 12 elif center.upper() == 'C' and system.lower() == 'monoclinic': return 13 elif center.upper() == 'P' and system.lower() == 'monoclinic': return 14 elif center.upper() == 'P' and system.lower() == 'triclinic': return 15 raise ValueError('non-standard Bravais lattice center=%s, cell=%s' % (center,system))
06c4c4f4ac2a0b915fb544df2f99a305e143cea2
24,453
def format_attribute(attribute): """Format a tuple describing an attribute. :param attribute: attribute tuple, which may be either ``(key, value)`` or ``(value,)``. :return: the formatted string If given, `key` is either formatted as itself, if it's a `str`, or else as ``repr(key)``, and is separated from `value` by an equal sign ("="). `value` is always formatted as ``repr(value)``. """ if len(attribute) == 1: (value,) = attribute return repr(value) key, value = attribute value_str = repr(value) if isinstance(key, str): key_str = key else: key_str = repr(key) return f"{key_str}={value_str}"
e5926e13da947240bf0efb215f2f75aad308c251
24,454
from typing import Iterator from typing import Any from typing import List def take(it: Iterator[Any], n: int) -> List[Any]: """Take n elements of the iterator.""" return [next(it) for _ in range(n)]
d12b7a26e0bc7712174f75e99fd9a9103df1b86e
24,455
def _text_choose_characters(player): """Display the menu to choose a character.""" text = "Enter a valid number to log into that character.\n" characters = player.db._playable_characters if len(characters): for i, character in enumerate(characters): text += "\n |y{}|n - Log into {}.".format(str(i + 1), character.name) else: text += "\n No character has been created in this account yet." text += "\n" if len(characters) < 5: text += "\n |yC|n to create a new character." if len(characters) > 0: text += "\n |yD|n to delete one of your characters." return text
1a94934f838f3fcae85a62ac867191b630b39f3d
24,456
import tempfile import os import shutil def workspace(request): """Returns a path to a temporary directory for writing data.""" test_workspace = tempfile.mkdtemp() def fin(): if os.path.exists(test_workspace): shutil.rmtree(test_workspace) request.addfinalizer(fin) return test_workspace
380448430cbcfacc3e0745d7355dc534d40e9f1d
24,457
import six import numpy def get_dtype(array_like): """Return dtype of an array like object. In the case of a nested sequence, the type of the first value is inspected. :param array_like: Array like object: numpy array, hdf5 dataset, multi-dimensional nested sequence :return: numpy dtype of object """ if hasattr(array_like, "dtype"): return array_like.dtype subsequence = array_like while hasattr(subsequence, "__len__"): # strings cause infinite loops if isinstance(subsequence, six.string_types + (six.binary_type, )): break subsequence = subsequence[0] return numpy.dtype(type(subsequence))
d0d961389cc64ad39714dfeb16dbd32b71bdeddd
24,458
def create_charm_name_from_importable(charm_name): """Convert a charm name from the importable form to the real form.""" # _ is invalid in charm names, so we know it's intended to be '-' return charm_name.replace("_", "-")
983c7b9796e8987c66d22aa01b9f3d273969f18a
24,459
def find_null_net_stations(db, collection="arrival"): """ Return a set container of sta fields for documents with a null net code (key=net). Scans collection defined by collection argument. """ dbcol = db[collection] net_not_defined = set() curs = dbcol.find() for doc in curs: if not 'net' in doc: sta = doc['sta'] net_not_defined.add(sta) return net_not_defined
582dc0a0b14e091e3b54fb3c2040669216ec4bf5
24,460
def FindApprovalValueByID(approval_id, approval_values): """Find the specified approval_value in the given list or return None.""" for av in approval_values: if av.approval_id == approval_id: return av return None
d4e14ad235dae859559376a99d14bf1fe8156054
24,461
def get_nice_address(address): """ Take address returned by Location Service and make it nice for speaking. Args: address: Address as returned by Location Service Place Index. Returns: str: Spoken address. """ spoken_street = address['Street'] if spoken_street.endswith('St'): spoken_street+= 'reet' if spoken_street.endswith('Av'): spoken_street += 'enue' spoken_number = address['AddressNumber'] if len(spoken_number) >= 4: spoken_number = spoken_number[:2] + ' ' + spoken_number[2:] spoken_address = spoken_number + " " + spoken_street return spoken_address
bc18427d5453646e766185e1e347839fa4e1b168
24,462
def is_pure_word(text: str) -> bool: """ Words containing parentheses, whitespaces, hyphens and w characters are not considered as proper words :param text: :return: """ return "-" not in text and "(" not in text and ")" not in text and " " not in text and "w" not in text
af364ebaab38c17543db238169f3a26587c69eae
24,463
import os import json def open_json(file_path): """ Opens a json file """ os.path.join(file_path) with open(file_path) as json_data: data = json.load(json_data) return data
9d3fc8ac13acfe289eae2d9c2388861dbf2506c3
24,464
def apply_substitution(subst_dict, cleaned_string): """ Apply a substitution dictionary to a string. """ encoded_string = '' # Slightly confusing, the get function will get the value of the # key named letter or will return letter. for letter in cleaned_string: letter = letter.lower() if letter in subst_dict: encoded_string += subst_dict.get(letter, letter) else: encoded_string += letter return encoded_string
b823139dff91e02e5765670c645b1977f21340c8
24,466
def _merge_config(entry, conf): """Merge configuration.yaml config with config entry.""" return {**conf, **entry.data}
b29404005bfd7578999bb982da27a7291f186228
24,467
from typing import Set import csv def get_all_ids(filename: str) -> Set: """Function that returns the set of all ids of all items saved in the file. Args: filename (str): File where items are saved. Returns: Set: Set of ids. """ ids = set() with open(filename, mode="r") as csvfile: reader = csv.DictReader(csvfile) for row in reader: ids.add(row['id']) return ids
4d4ff1374d273dbdc0fc499e1b634ea2bf604b3d
24,469
import re def parse_xml(xml): """ Convert the XML to dict """ if not xml: return {} if type(xml) is bytes: xml = xml.decode("utf8") return {k: v for v,k in re.findall('\<.*?\>\<\!\[CDATA\[(.*?)\]\]\>\<\/(.*?)\>', xml)}
44061598154398aabfd52bd12c8522fdd7ca66cb
24,474
def and_list(items): """ Create a comma-separated list of strings. """ assert isinstance(items, list) match len(items): case 0: return '' case 1: return items[0] case 2: return f'{items[0]} and {items[1]}' case _: return ', '.join(items[0:-1]) + f', and {items[-1]}'
b0cea95bfe95d613986b6130fd7acabaef6a1d7c
24,477
def cpm2usv(cpm_value): """ Using chart at: http://nukeprofessional.blogspot.jp/2012/04/geiger-counter-interpretation.html """ usv_per_click = 0.1/12 return cpm_value * usv_per_click
7595948a97fbc538e2c745eb643cf3c0ff278ae6
24,481
def set_optional_attr(in_args, classifier): """ This function applies all the optional commandline arguments. This is designed to call after the instantiation of classifier object Parameters: in_args - This is parsed command line arguments classifier - This is an object of type Flower_Classifier. All the optional attributes of this object will be set using setters. Return: classifier - Classifer object will the optional attributes set to it. """ if in_args.save_dir != None: classifier.save_dir = in_args.save_dir if in_args.learning_rate != None: classifier.learning_rate = in_args.learning_rate if in_args.hidden_units != None: classifier.hidden_units = in_args.hidden_units if in_args.epochs != None: classifier.epochs = in_args.epochs if in_args.gpu != None: classifier.gpu = in_args.gpu return classifier
b850ee150143d4727430f9d2077844fa687f26ee
24,482
import requests def getStateLatLon(lat, lon): """ This function is used for reverse geocoding to return the state that coordinates fall into using the Big Data Clout API """ url = f"https://api.bigdatacloud.net/data/reverse-geocode-client?latitude={lat}&longitude={lon}&localityLanguage=en" response = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'}) data = response.json() return data["principalSubdivision"]
c452f178ab14325b91bbb0ff43793d45b1c61931
24,483
def get_field_labels(): """Get a list of the field labels.""" return ['Device', 'Signal', 'Error', 'Info']
184431658cbbb81c2cc3c6ca44e4344762dcad78
24,484
import json def serialize_parameters(parameters: dict): """ Serializes the hyperparameter tuning parameter spec. Args: parameters (Dict[str, hyperparameter_tuning._ParameterSpec]): Dictionary representing parameters to optimize. The dictionary key is the metric_id, which is passed into your training job as a command line key word argument, and the dictionary value is the parameter specification of the metric. from google.cloud.aiplatform import hyperparameter_tuning as hpt parameters={ 'decay': hpt.DoubleParameterSpec(min=1e-7, max=1, scale='linear'), 'learning_rate': hpt.DoubleParameterSpec(min=1e-7, max=1, scale='linear') 'batch_size': hpt.DiscreteParamterSpec(values=[4, 8, 16, 32, 64, 128], scale='linear') } Supported parameter specifications can be found until aiplatform.hyperparameter_tuning. These parameter specification are currently supported: DoubleParameterSpec, IntegerParameterSpec, CategoricalParameterSpace, DiscreteParameterSpec Returns: An intermediate JSON representation of the parameter spec """ return [ json.dumps({ **parameters[param].__dict__, "parameter_spec_value_key": parameters[param]._parameter_spec_value_key }) for param in parameters ]
db311031cf9dfd6fd048da15ea71a73b7a9826cc
24,485
def get_group_with_given_permissions(permissions, groups): """Get group with given set of permissions.""" for group in groups: group_perm_pks = {perm.pk for perm in group.permissions.all()} if group_perm_pks == set(permissions): return group
8fc7d3697fa68d4c3609383ae588cd13d16e1b45
24,486
from typing import List def fix_pyhsm(pkgs: List[str]) -> List[str]: """ Replace hack to install pyhsm from SUNET with the real package-name, to avoid this error: python setup.py sdist bdist_wheel --universal ['eduid-userdb>=0.9.1,==0.9.*', 'fastapi', 'git+https://github.com/SUNET/python-pyhsm@ft-python3-support', ...] error in vccs_auth setup command: 'install_requires' must be a string or list of strings containing valid project/version requirement specifiers; Parse error at "'+https:/'": Expected stringEnd """ return ['pyhsm' if 'ft-python3-support' in x else x for x in pkgs]
7af2fc81ef5ddfb794a9803f4960084b0be3b96e
24,487
def modify_column(func, rows): """Apply 'func' to the first column in all of the supplied rows.""" delta = [] for columns in rows: delta.append([func(columns[0])] + columns[1:]) return delta
1362fd3a54df44a132e47563d8ffe2d472ceb934
24,491
def bool_from_native(value): """Convert value to bool.""" if value in ('false', 'f', 'False', '0'): return False return bool(value)
5f9cbcef5c862b5a507d376ffca8d814acfee87b
24,494
import os def adjust_filename(fname, ext_out): """Replaces extension in filename. Fname can be full path, in that case only tail is returned. Parametrs ----------- fname: str ext_out: str Returns ----------- tail_out : str """ (head, tail) = os.path.split(fname) (base, ext) = os.path.splitext(tail) tail_out = base + ext_out return tail_out
97a76dae5e3150e5dee2bded2b340a8992f35625
24,495
import os def replaceExtensionAndFolder(inputPath, outputFolder, newExtension): """Convenience function to replace the extension and the folder of a file path""" newExt = os.path.splitext(inputPath)[0] + newExtension return os.path.join(outputFolder, os.path.basename(newExt))
ecd146f79304136e451d464b7f1b07f001c4201e
24,496
def determine_url_parts(sharepoint_url, path): """Determine the different parts of the relative url""" file_name = False document_lib = False url_parts = path.split("/") if len(url_parts) < 3: error_message = f"Invalid path specified. Path need to start with site|group|team/<name>/. Path specified was '{path}'" raise Exception(error_message) site = sharepoint_url + "/" + "/".join(url_parts[:2]) if ":" in url_parts[2]: document_lib = url_parts[2].split(":")[1] path = "/".join(url_parts[3:]) else: path = "/".join(url_parts[2:]) possible_file_name = url_parts[len(url_parts)-1] if len(possible_file_name.split(".")) > 1: file_name = possible_file_name return site, path, file_name, document_lib
27332f39f8d73a808d2464f5213b2445f6414f2a
24,498
def check_between_dates(start, end, to_check_start, to_check_end=False): """ CBD - Check if a datetime is between other two datetimes param start: datetime - Start datetime to check param end: datetime - End datetime to check param toCheckStart: datetime - Datetime to check if is in between param toCheckEnd: datetime - Datetime to check if is between(optional) return: Boolean - True if toCheckStart && toCheckEnd is between start and end otherwise false """ if not to_check_end: return start <= to_check_start <= end else: return start <= to_check_start <= to_check_end <= end
1f80f25ac7afd79985169bc547d4e3442638645f
24,499
from typing import Counter import math def shannon_entropy(data): """Calculates shannon entropy value Arguments: data {string} -- the string whose entropy must be calculated Returns: [float] -- value that represents the entropy value on the data {string} """ p, lns = Counter(data), float(len(data)) return -sum(count/lns * math.log(count/lns, 2) for count in p.values())
d383054f33639136182751a6d884839595ec4efd
24,500
def note_to_f(note: int, tuning: int=440) -> float: """Convert a MIDI note to frequency. Args: note: A MIDI note tuning: The tuning as defined by the frequency for A4. Returns: The frequency in Hertz of the note. """ return (2**((note-69)/12)) * tuning
55201b54e525966ee7f8133c217111b22a26d827
24,501
import os import glob def valid_directory(ResuDir): """ Check if result directory is valid """ cache_file = os.path.join(ResuDir, 'combined_diag.h5') find_fnames = os.path.join(ResuDir,'VOICEXX_[0-9]*.h5') find_fnames = glob.glob(find_fnames) nb_fnames = len(find_fnames) return nb_fnames!=0 or os.path.exists(cache_file)
a25622b434753e3b31d75fbc4c01f5c259d724ed
24,502
def get_attr(attrs, bases, attrName): """bfs""" s = list(bases) seen = set(s) it = iter(s) while True: if attrName in attrs: return attrs[attrName] next_base = next(it, None) if next_base is None: return None s.extend(b for b in next_base.__bases__ if b not in seen) seen.update(next_base.__bases__) attrs = next_base.__dict__
1453997f93b3b8ed999d852b1386ce3ef44074af
24,503
def compress(depths): """Compress takes a depth file and compresses the depths into "steps": The zones of consistent depth registered as one singular depth for the sake of finding peaks Args: depths (dictionary): The original depth file in position: depth format Returns: dictionary: The dictionary of compressed depth files in start_pos-end_pos: depth of step """ depths_compressed = {} keys = list(depths.keys()) start_key = keys[0] start = int(depths[keys[0]]) jump = False for index, key in enumerate(keys): end = int(depths[key]) if index > 0: int_prev = int(keys[index - 1]) int_end = int(key) if int_end - int_prev != 1: jump = True if start != end or jump: #Finds all key_out = int(start_key), int(keys[index -1]) depths_compressed[key_out] = int(start) start = end start_key = key jump = False return depths_compressed
9208802db69bab2344b55e1dd137a655c67fd279
24,504
def _time_string_format(dt, time_zone=None): """ format a datetime based on time zone :param dt: datetime object :param time_zone: a timezone object, or None for local time zone :return: """ return dt.astimezone(time_zone).isoformat()
9e96bd74b01877234a3406e74a8d82d1066b79f8
24,505
import os def check_pid(pid): """ Check For the existence of a unix pid. """ try: os.kill(pid, 0) except OSError as ex: template = "An exception of type {0} occured.\nArguments:\n{1!r}" message = template.format(type(ex).__name__, ex.args) print (message) return False else: return True
aec1c8065291e04f1133047dbd7da395fa6e9124
24,506
import os import re def custom_secure_filename(filename): """ 将不安全的文件名转换为安全的可以在文件系统实用的安全文件名 """ filename = filename.encode('utf-8') for sep in os.path.sep, os.path.altsep: if sep: filename = filename.replace(sep, ' ') # unicode 中文编码范围为 /u4e00-/u9fa5 _filename_zh_ascii_strip_re = re.compile(u"[^A-Za-z0-9_.-\u4e00-\u9fa5]") _windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4', 'LPT1', 'LPT2', 'LPT3', 'PRN', 'NUL') filename = str(_filename_zh_ascii_strip_re.sub('', '_'.join(filename.split()))).strip('._') if os.name == 'nt' and filename and filename.split('.')[0].upper() in _windows_device_files: filename = '_' + filename return filename
1f28ecb98d699923b14487f74cd11b586d0d848f
24,507
from typing import List import operator def insertion_sort(a: List, compare=operator.gt) -> List: """Insertion sort algorithm""" for j in range(1, len(a)): key = a[j] i = j - 1 while i >= 0 and compare(a[i], key): a[i + 1] = a[i] i = i - 1 a[i + 1] = key return a
be6915ceac55a7a890c775ef8f3953e9860b9fc5
24,508
import json def get_json_from_rel_path(rel_path): """ rel_path::str The relative path to the json file """ if not rel_path: return {} with open(rel_path, 'r') as file_to_import: return json.load(file_to_import)
a441febf22f325467004fffca986adec08233b3c
24,509
def _get_iso_name(node, label): """Returns the ISO file name for a given node. :param node: the node for which ISO file name is to be provided. :param label: a string used as a base name for the ISO file. """ return "%s-%s.iso" % (label, node.uuid)
3d73236bfa2b8fab8af39b9a3083b540e93eb30d
24,510
def search_vowel(args): #passes the arguments from the previous function into this function """this function passes the arguments gained in the previous function and uses a for loop to compare each letter of the given string to the given vowell""" text = args.text #sets the "some text" argument from the parser as the variable 'text' to be used in the for loop vowel = args.vowel #sets the "vowel" argument from the parser as the variable 'vowel' to be used in the for loop vowelinword = False #this is false unless proven true by the for loop - only proven true if the letter at that index is equal to the vowel variable thisindex = 0 #establishes 'thisindex' as a variable that changes to the value of the index where the letter equals the vowel variable for i in range(len(text)): #for loop for the length of the text variable if text[i] == vowel: #if the letter at index position i is exactly equal to the given value, then the variable 'vowelinword' becomes true vowelinword = True thisindex = i #sets the variable 'thisindex' equal to the index at which the vowel is located to call later if vowelinword: #if this statement becomes true by the vowel being in the text, then it will print the following statement return print(f'Found "{vowel}" in "{text}" at index {thisindex}.') else: #if 'vowelinword' is still false by the vowel not being in the text, then it will print the follwing statement print(f'"{vowel}" is not found in "{text}".')
52a3ec30ed58e6bb94d9488f1b0d57eb9c7bea83
24,511
def canonicalize_rules(rules): """ Canonicalize rules. New Rule objects are created. :param rules: Set of Rule objects :return: Set of Rule objects """ canonicalized_rules = set() for rule in rules: canonic_rule = rule.canonicalize() canonicalized_rules.add(canonic_rule) return canonicalized_rules
f88c1be8f73e2420d09d17682cc36387e78b1553
24,512
def open_file(file): """Open a file and read the content """ if not file: return '' content = '' try: with open(file, 'r+', encoding="utf-8") as f: content = f.read() except Exception as err: print('Something wrong happened while trying to open the file ' + file) return content
0b0308256b2a68cdc5a84fad850cdaa086d3541d
24,513
import re def prompt(message, suffix=' '): """ Returns the user input """ prompt_text = "{}{}".format(message, suffix) input_value = input(prompt_text) if not input_value: input_value = '' else: input_value = re.sub(r'\s', '', input_value).lower() return input_value
43444c5c913af10ed9a27c6f644b0db277b1eab9
24,515
def vsh_larionov_older(gr, sand_line, shale_line): """ Apply Larinov equation (Larinov, 1969) to correct Gamma Ray index Vshale values. """ shale_line = float(shale_line) sand_line = float(sand_line) vsh_grindex = (gr - sand_line)/(shale_line - sand_line) vsh_larionov = 0.33 * (2.0**(2.0*vsh_grindex) - 1.0) # clip values to range 0 - 1 vsh_larionov[vsh_larionov > 1] = 1 vsh_larionov[vsh_larionov < 0] = 0 return vsh_larionov
35fe117cabb10e7d1e95d8e8a23035d4eb8f2950
24,516
def pages(request): """Adds the current page backend to the template.""" context = { "pages": request.pages } return context
d0df250697e2a9e686def8f0e39e91d1687099a4
24,518
import time def seconds_since(t): """seconds_since returns the seconds since `t`. `t` is assumed to be a time in epoch seconds since time.time() returns the current time in epoch seconds. Args: t (int) - a time in epoch seconds Returns int: the number of seconds since `t` """ return time.time() - t
db398fa2a18689c5ccd05d365948e9b5cd1f99d5
24,519
def intersect(box1, box2): """Calculate the intersection of two boxes.""" b1_x0, b1_y0, b1_x1, b1_y1 = box1 b2_x0, b2_y0, b2_x1, b2_y1 = box2 x0 = max(b1_x0, b2_x0) y0 = max(b1_y0, b2_y0) x1 = min(b1_x1, b2_x1) y1 = min(b1_y1, b2_y1) if x0 > x1 or y0 > y1: # No intersection, return None return None return (x0, y0, x1, y1)
70daa13f0f0e59bbb21f52a74434935a4424dc68
24,520
def convert_tokens(eval_file, qa_id, pp1, pp2): """ Convert the predictions from the model into the format to be used by the evaluation function. :param eval_file: Contains the correct answers. :param qa_id: Question ids :param pp1: Start positions. :param pp2: End positions. :return: answer_dict - The answers later evaluated. remapped_dict - The remapped answer to be saved into file on test mode. """ answer_dict = {} remapped_dict = {} for qid, p1, p2 in zip(qa_id, pp1, pp2): context = eval_file[str(qid)]["context"] spans = eval_file[str(qid)]["spans"] uuid = eval_file[str(qid)]["uuid"] # The beginning of the start span. start_idx = spans[p1][0] # The ending of the last span. end_idx = spans[p2][1] answer_dict[str(qid)] = context[start_idx: end_idx] remapped_dict[uuid] = context[start_idx: end_idx] return answer_dict, remapped_dict
1e8280c2e72dd1ed22293ddc3518909ea4596fe8
24,521
def donottrack(request): """ Adds ``donottrack`` to the context, which is ``True`` if the ``HTTP_DNT`` header is ``'1'``, ``False`` otherwise. This context processor requires installtion of the ``donottrack.middleware.DoNotTrackMiddleware``. Note that use of this context processor is not strictly necessary. (Though it is quite convenient.) If you are using the ``django.core.context_processors.request`` context processor, you have access to ``{{ request.donottrack }}``. """ # We could just use the ``donottrack.utils.get_donottrack`` function rather # than rely on the middlware, but we want to require the middleware so that # the VARY header is properly patched to account for DNT. try: return {'donottrack': request.donottrack} except AttributeError: raise AttributeError("'WSGIRequest' object has no attribute 'donottrack'" " - 'donottrack.middleware.DoNotTrackMiddleware' must be in your" " MIDDLEWARE_CLASSES")
50dc0c60ed70137c9aa1473ae436e48ef859ae9a
24,522
import warnings def warning(): """Base of all warnings.""" warnings.simplefilter('error', Warning) try: warnings.warn("danger", Warning) except Warning: return "something strange might happen" finally: warnings.simplefilter('ignore', Warning)
a4599007c2c6242c14358cb08d04da9d94ee45e4
24,523
def _sort_orders(orders): """Sort a list of possible orders that are to be tried so that the simplest ones are at the beginning. """ def weight(p, d, q, P, D, Q): """Assigns a weight to a given model order which accroding to which orders are sorted. It is only a simple heuristic that makes it so that the simplest models are sorted first. """ cost = 0 if P + D + Q: cost += 1000 cost += p**2 + d**2 + q**2 cost += P**3 + 2*d**3 + 3*q**3 return cost orders = [(weight(*o), o) for o in orders] orders.sort() orders = [x[1] for x in orders] return orders
a164aa850e44ddb59429436b78ea2cf0603a1aae
24,524
def ToGLExtensionString(extension_flag): """Returns GL-type extension string of a extension flag.""" if extension_flag == "oes_compressed_etc1_rgb8_texture": return "OES_compressed_ETC1_RGB8_texture" # Fixup inconsitency with rgb8, # unfortunate. uppercase_words = [ 'img', 'ext', 'arb', 'chromium', 'oes', 'amd', 'bgra8888', 'egl', 'atc', 'etc1', 'angle'] parts = extension_flag.split('_') return "_".join( [part.upper() if part in uppercase_words else part for part in parts])
78d767beba572291193c9819f885c8eb46650c1c
24,525
def uniqued(iterable): """Return unique list of items preserving order. >>> uniqued([3, 2, 1, 3, 2, 1, 0]) [3, 2, 1, 0] """ seen = set() add = seen.add return [i for i in iterable if i not in seen and not add(i)]
51bc142d6872a2e811724cd0371f982a390d8f06
24,528
import re def cleanup(name, sep="-"): """Used for sanitizing addon names. The function removes Orange/Orange3 from the name and adds spaces before upper letters of the leftover to separate its words.""" prefix, separator, postfix = name.partition(sep) name = postfix if separator == sep else prefix return " ".join(re.findall("[A-Z][a-z]*", name[0].upper() + name[1:]))
69d367bd3a0865bdca9d61978fa65a319e80207c
24,529
import os def get_runtime_token(): """Returns the value of the ACTIONS_RUNTIME_TOKEN var in the environment. Raises an exception if not set.""" token = os.environ.get('ACTIONS_RUNTIME_TOKEN') if not token: raise Exception('Unable to get ACTIONS_RUNTIME_TOKEN env variable') return token
8f9b7360d5734c6f460ecca13a666511dcbf685c
24,530
def genKgris(k): """ Calcule une liste de ``k`` teintes de gris allant du noir au blanc. Paramètre: k --> nombre de teintes (>=2) La liste génére doit nécessairement commencer par la couleur noir (0,0,0) et nécessairement terminer par la couleur blanc (255,255,255). Les autres valeurs doivent être des teintes de gris uniformément réparties entre le noir et le blanc. :: EXEMPLES:: >>> genKgris(2) [(0, 0, 0), (255, 255, 255)] >>> genKgris(3) [(0, 0, 0), (127, 127, 127), (255, 255, 255)] >>> genKgris(4) [(0, 0, 0), (85, 85, 85), (170, 170, 170), (255, 255, 255)] """ coef = 255//(k-2+1) # -2 (blanc et noir) +1 (1 élément minimum) # teintes contient les valeurs de chaque pixel pour éviter la répétition # teintes commence et fini par du blanc... teintes = [0] teintes += [n*coef for n in range(1, k-1)] # valeurs intermédiaires # et se fini par du noir. teintes += [255] return [(v,v,v) for v in teintes]
3e480d5bba5f60e3448392da97c7d7738f5decad
24,531
def fromRoman(r): """Return the numeric value of a valid roman numeral.""" # Handle subtractions by trying to match two characters first. # If this fails, match only one character. romanMap = dict((('M', 1000), ('CM', 900), ('D', 500), ('CD', 400), ('C', 100), ('XC', 90), ('L', 50), ('XL', 40), ('X', 10), ('IX', 9), ('V', 5), ('IV', 4), ('I', 1))) num, i = 0, 0 try: while i < len(r) - 1: if r[i:i+2] in romanMap: num += romanMap[r[i:i+2]] i += 2 else: num += romanMap[r[i]] i += 1 if i < len(r): num += romanMap[r[i]] return num except KeyError as e: raise ValueError('Bad roman numeral: ' + r)
b03c49bf1dfb3f72585ff2667cf9df2eea9e7a4c
24,532
from datetime import datetime import random def _get_random_high(): """Return big random number""" return int(datetime.now().microsecond * datetime.now().microsecond * (1+ random.random()))
2ab6fb65ad76a99a2ee9b728d70ccda87567d170
24,533
import glob def summfile(ifldpth,idx): """ Find _x1dsum file ending with idx + '_x1dsum.fits' Parameters ---------- ifldpth : str path to the files idx : str the file ends with idx + '_x1dsum.fits' e.g. idx = '10' Returns ------- fname : str sum file name """ fname = glob.glob(ifldpth+'*'+idx+'_x1dsum.fits') if len(fname) != 1: print('multiple or zero '+idx+'_x1dsum files: ', len(fname)) print(ifldpth) else: fname = fname[0] return fname
05f6778276487de8feb684dca41c326321335049
24,534
def get_hashable_cycle(cycle): """ Cycle as a tuple in a deterministic order. Args ---- cycle: list List of node labels in cycle. """ # get index of minimum index in cycle. m = min(cycle) mi = cycle.index(m) mi_plus_1 = (mi + 1) if (mi < len(cycle) - 1) else 0 if cycle[mi-1] > cycle[mi_plus_1]: result = cycle[mi:] + cycle[:mi] else: result = list(reversed(cycle[:mi_plus_1])) + \ list(reversed(cycle[mi_plus_1:])) return tuple(result)
28e81992a4fc306151e8ac87426721f41d516beb
24,535
from typing import List def split_path(path: str) -> List[str]: """Return the components of the path. >>> path == "/".join(split_path(path)) True for all path >>> split_path("a/b/c") ["a", "b", "c"] >>> split_path("/a/b/c") ["", "a", "b", "c"] >>> split_path("~/a/b/c") ["~", "a", "b", "c"] """ if path in ("", "/"): return [""] # if path.startswith("/"): # return ["/"] + path[1:].split("/") if not path.startswith((".", "~", "/")): path = f"./{path}" return path.split("/")
b5414ee04e458b2bcd78e5802fbe68b73c0d8429
24,537
def _unmask_idx(idx_mask, to_change): """Return mask over all indices originally passed to _masked_idx, marked False where indices were originally masked out or where to_change parameter is False. Parameters: idx_mask: as returned by _masked_idx to_change: mask over True values in idx_mask """ if idx_mask is None: return to_change else: idx_mask[idx_mask] = to_change return idx_mask
d2880a4ef97bd38504e43f2dd3a87be591d6a511
24,538
def getTS(z): """ Calculate the spin temperature at a given redshift Not a totally straightforward thing to work out. """ TS=1 return TS
d9f47188f34c6f90d4be7b7fae10da10c8475e9f
24,539
import os def get_article_summary_urls(): """ The NCTB dataset maintained in the Github repo is listed as separate files. For more information, see: https://github.com/tafseer-nayeem/BengaliSummarization """ article_urls = [] summary_urls = [] root_url = "https://raw.githubusercontent.com/tafseer-nayeem/BengaliSummarization/main/Dataset/NCTB/" for index in range(1, 140): article_url = os.path.join(root_url, "Source/{}.txt".format(str(index))) summary_url = os.path.join(root_url, "Summary/{}.txt".format(str(index))) article_urls.append(article_url) summary_urls.append(summary_url) return article_urls, summary_urls
e6b0b4172eb2169a29b94458f9f61f0150281bc1
24,540
def get_susceptibility_matrix_index(age): """ The age matrix is 16x16 and it's split in groups of 4, We can use whole division to quickly get the index """ if age >= 75: return 15 else: return age // 5
6efb3665f8578820d4bd1c969538c3026f2bdcf9
24,541
def truncate(string, length=60): """Truncate the given string when it exceeds the given length.""" return string[:length - 4] + '.' * 3 if len(string) > length else string
c3bfc6d78703830e23240e671bc5c29f1cdcb19d
24,543
from PIL import Image def pilnew_imageobject(image_file: str): """Returns a PIL image object from a referenced image file. Args: image_file (str): Reference an existing image file. Returns: PIL.Image.Image: Returns a PIL image object """ # image_object = Image.open(image_file) return image_object
f80e8b2cb9a4a93f94fe1d3005c53caa54c00877
24,545
def string_handler(item): """ Create a string out of an item if isn't it already. Parameters: - item: The variable to make sure it is a string. Returns: The input as a string. """ return ( str(item) if not isinstance(item, str) else item )
dcd767d1e05bab1f2ce770347b578b627005acf4
24,546
from typing import Dict from typing import List from typing import Any def parse_get_arguments(args: Dict[bytes, bytes], expected_args: List[str]) -> Dict[str, Any]: """Parse all expected arguments. If there are missing arguments, returns the missing arguments """ expected_set = set(expected_args) args_set = set() for arg1 in args: args_set.add(arg1.decode('utf-8')) # if there are expected args missing, we return None diff = expected_set.difference(args_set) if diff: return {'success': False, 'missing': ', '.join(diff)} ret: Dict[str, str] = dict() for arg2 in expected_args: key_str = arg2.encode('utf-8') first_param = args[key_str][0] assert isinstance(first_param, bytes) ret[arg2] = first_param.decode('utf-8') return {'success': True, 'args': ret}
23dd090f6094966d726e1652927123eb13e90a56
24,547
import os import glob def get_annon_db_file(base_path, tblname): """Utility function to get all the files for merging when consolidating to create a single file aka DB file. """ result = [y for x in os.walk(base_path) for y in glob.glob(os.path.join(x[0], '*-'+tblname))] return result
f19e4f946bd8a68c6078852ebc75312b100d5300
24,549