content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def stations_by_river(stations): """Find stations which are by the same river Args: stations (list): list of MonitoringStation objects Returns: dictionary: river name as key to a list of station names """ dic_stations_river = {} for station in stations: key = station.river if key in dic_stations_river: dic_stations_river[key].append(station.name) else: dic_stations_river[key] = [station.name] return dic_stations_river
f094ee7cae07a0b6037aac75559b68b09e88c26d
21,963
from typing import Iterable from typing import Tuple from typing import Any from typing import List def unzip(tuples: Iterable[Tuple[Any, ...]]) -> List[Iterable[Any]]: """The inverse of the `zip` built-in function.""" return [list(x) for x in zip(*tuples)]
10dd4755c501f64f6b98dea8abd7677d6fa23535
21,964
import os def is_file_exists(file_path, is_relative=True): """Check if the file exists at the given location Args: file_path (str): path of the file in the string is_relative (bool, optional): is the path relative or absolute. Defaults to True. Returns: str: Return the full path if file found, None if not found """ if is_relative: file_path = os.path.join(os.getcwd(), file_path) if not os.path.exists(file_path): return False return file_path
9bcfd5334b2ae1f2e55e9979feefc470334cc566
21,965
def clean_unicode(text): """A function to clean unsee unicode character like \\u2xxx Args: text (str): Can be news title or news body Returns: [str]: A unicode-free string """ clean_text = text.encode("ascii", errors="replace").strip().decode("ascii") clean_text = clean_text.replace("?", ' ') return clean_text
ed62e2644818120ea8417e2549bac76870447e55
21,967
import math def make_divisible(x, divisor, ceil=True): """ Returns x evenly divisible by divisor. If ceil=True it will return the closest larger number to the original x, and ceil=False the closest smaller number. """ if ceil: return math.ceil(x / divisor) * divisor else: return math.floor(x / divisor) * divisor
02df42d8c490ac0c85d7ad983c0c9c838bdfa088
21,968
def color_mapping_func(labels, mapping): """Maps an label (integer or string) to a color""" color_list = [mapping[value] for value in labels] return color_list
da1f4f5432a28ef972bdcd74bd351ab4d5d09aad
21,969
from typing import Counter def count_bits_by_place(places_list): """Given a list of places values, get a frequency count of each""" bit_counter = Counter(places_list) return bit_counter
7fcda99e60d8f9b6734103e2776364a0cb8bcae0
21,970
def cli(ctx, group_id, new_name): """Update the name of a group Output: a dictionary containing group information """ return ctx.gi.groups.update_group(group_id, new_name)
f4b532eb18a04d56bd1b2e5fd1b01db1f53ff50b
21,971
def bytes_to_str(b): """Converts a byte string argument to a string""" if isinstance(b, str): return b return str(b, 'latin1')
1494de161b7a9b4ef7743627bbbeba7624c7fa1c
21,975
import os import json def get_default_config(): """ This function loads the configuration settings JSON, or creates it. :return config_settings: Dictionary containing configuration settings """ fn = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config_settings.json') if os.path.isfile(fn): with open(fn) as file_stream: config = json.load(file_stream) return config else: config = {'text_norm': {'utf8_to_ascii': True, # Options: True or False 'remove_twitter': True, # Options: True or False 'remove_markup': True, # Options: True or False 'remove_nonsentenial': True}, # Options: True or False 'counts': {'combine_same_user_counts': True, # Options: True or False 'format': '.json', # Options: '.json', '.json.gz', '.txt', or '.txt.gz' 'count_separator': '|'}, 'filter_counts': {}, 'model': {'method': 'sklearn'}} with open(fn, 'w') as outfile: json.dump(config, outfile) return config
21a43c7bf8fcd6856d84c00fb4c8486297790cc8
21,976
import inspect def isobject(obj): """ If requested object is a class/object - True returned, else False returned """ if not obj: return if not hasattr(obj, '__dict__'): return False if inspect.isroutine(obj): return False if inspect.isclass(obj): return False # if type(object) == types.TypeType: return True
4b596354e3f88eb73ac6f30b026d07720463d541
21,978
def brewery_name(request): """ Фикстура возвращает список name пивоварен """ return request.param
7456a9334bbb5e351604396daa30f0191ce13538
21,979
def print_exception(type, value, traceback, limit=0, file=None): """Print exception information and up to *limit* stack trace entries from *traceback* to *file*. This differs from :func:`print_tb` in the following ways: (1) if *traceback* is not ``None``, it prints a header ``Traceback (most recent call last):``; (2) it prints the exception *type* and *value* after the stack trace; (3) if *type* is :exc:`SyntaxError` and *value* has the appropriate format, it prints the line where the syntax error occurred with a caret indicating the approximate position of the error.""" return None
4a7420be3e003d2b884d739bd12b3f752d8a9534
21,980
def _dict_key_to_key(dictionary): """creates a dummy map from the nominal key to the nominal key""" return {key : key for key in dictionary.keys()}
9feed672c1f678ed58a4d29fea5fbb0848b8b483
21,981
def seconds_to_nanoseconds(seconds): """Convert the specified number of seconds to nanoseconds :param seconds: an integer representing a certain number of seconds :returns: an integer (or float) representation of the specified number of seconds as nanoseconds """ return seconds * 1000000000
d9f4687335b263a73f7f22065bfbbebb12468ce3
21,982
import os import shutil def create_experiment_directories(exp_dir, model_name, force=False): """Create the required directory for a specific DeepSphere model.""" # Check if the experiment directory already exists exp_dir = os.path.join(exp_dir, model_name) if os.path.exists(exp_dir): if force: shutil.rmtree(exp_dir) else: raise ValueError("The directory {} already exists.\ force=True in create_experiment_directories() will delete content of the existing directory.\ Please delete such directory manually or: \ - specify 'model_name' in model_settings \ - specify 'model_prefix' and/or 'model_suffix' in model_settings".format(exp_dir)) ##------------------------------------------------------------------------. # Define standard directories model_weights_dir = os.path.join(exp_dir, "model_weights") figures_dir = os.path.join(exp_dir, "figs") figs_skills_dir = os.path.join(figures_dir, "skills") figs_training_info_dir = os.path.join(figures_dir, "training_info") model_predictions_dir = os.path.join(exp_dir, "model_predictions") space_chunked_forecasts_dir = os.path.join(model_predictions_dir, "space_chunked") forecast_chunked_forecasts_dir = os.path.join(model_predictions_dir, "forecast_chunked") model_skills_dir = os.path.join(exp_dir, "model_skills") ##------------------------------------------------------------------------. # Create directories os.makedirs(model_weights_dir, exist_ok=False) os.makedirs(figs_skills_dir, exist_ok=False) os.makedirs(figs_training_info_dir, exist_ok=False) os.makedirs(model_skills_dir, exist_ok=False) os.makedirs(space_chunked_forecasts_dir, exist_ok=False) os.makedirs(forecast_chunked_forecasts_dir, exist_ok=False) ##------------------------------------------------------------------------. # Return the experiment directory return exp_dir
1aa35479d84ea40dfb187924c9596601c7ec1362
21,983
def get_board_copy(board): """Make a duplicate of the board list and return it the duplicate.""" return list(board)
36aa2bd0719dc73602fd1fda68b120ea9c6a4a4c
21,984
def map_challenge_set(x): """ Map a SQLAlchemy ChallengeSet model object into a generic map. :param x: SQLAlchemy ChallengeSet model. :return: Generic map containing relevant details for REST API. """ return {"id": x.id, "slug": x.slug, "name": x.name, "description": x.description}
5a70a8d14a486c9ee57475d8b4c11b8d956430b1
21,985
import requests def get_session(username, password): """ Returns a request session for the SANtricity RestAPI Webserver """ request_session = requests.Session() # Default credentials request_session.auth = (username, password) request_session.headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} # Ignore the self-signed certificate issues for https request_session.verify = False return request_session
42f896195f714bad3928146b49bac88564b4a477
21,986
from typing import List import re def lexLine(line : str) -> List[str]: """Tokenizes a code line Args: line (str): A line of VLang code Returns: List(str): A list of tokenized strings """ if line == "\n": return ["\n"] elif line.startswith('#'): return ["#"] splittedList = re.split('\s|(\")', line) # Splits the line based on quotes or whitespaces(spaces) filteredList = filter(None, splittedList) # Filters None's out of the list. These ocur due to the regex expression above return list( filteredList )
3f8c3633300275e43c93381c91202ac02aeca01b
21,987
def parse_http_header(header, header_key): """ **Parse HTTP header value** Parse the value of a specific header from a RAW HTTP response. :param header: String containing the RAW HTTP response and headers :type header: str :param header_key: The header name of which to extract a value from :type header_key: str :return: The value of the header :rtype: str """ split_headers = header.split('\r\n') for entry in split_headers: header = entry.strip().split(':', 1) if header[0].strip().lower() == header_key.strip().lower(): return ''.join(header[1::]).split()[0]
993b8190d631accf7c63e259aa5f4f4c4b657c0e
21,988
import os def insertintofilename(filepath, insertion): """Append some text between a filename and the file suffix.""" newfile = '' if os.path.dirname(filepath): newfile += os.path.dirname(filepath) + os.sep newfile += (os.path.basename(os.path.splitext(filepath)[0]) + insertion + os.path.splitext(filepath)[1]) return newfile
940bbeac3a778334afc37822546018ccd6331970
21,989
def split(word): """ Simple function to convert every string send it to him in to a list of single characters. """ list_word = [] for character in word: list_word.append(character) return list_word
dd33a04b7de26097dd02070339746cab7417a5dd
21,992
def index_select_multidim_jit(input, indices): """ Manual unroll of index_select_multidim_nojit """ n = indices.size()[-1] input_size = input.size() if n == 1: return input[( indices[...,0].clamp(0, input_size[0]-1), ...)] elif n == 2: return input[( indices[...,0].clamp(0, input_size[0]-1), indices[...,1].clamp(0, input_size[1]-1), ...)] elif n == 3: return input[( indices[...,0].clamp(0, input_size[0]-1), indices[...,1].clamp(0, input_size[1]-1), indices[...,2].clamp(0, input_size[2]-1), ...)] else: raise NotImplemented
64d6a69264a9d18db15c7089da3b5bae5208bece
21,993
def is_dictable(obj): """Returns ``True`` if `obj` has a ``to_dict()`` method.""" return hasattr(obj, "to_dict")
0a12ccae0a0d3242db0bb7ad6537f2ff34ee7c48
21,994
def _mean(values:list)->float: """ Return mean """ return sum(values)*1.0/len(values)
dc433357122e84523200a6e932c787f96ff66185
21,995
def get_name(i): """ Return the name of i-th component of a sensor sample """ assert i >= 0 and i <= 5, f"Component {i} is not supported, must be between 0 and 5" names = ["x_acc", "y_acc", "z_acc", "x_gyro", "y_gyro", "z_gyro"] return names[i]
9775b41ecfb28a5cefaee0bbf83569f9d115d4ea
21,996
import os def get_files_with_extension(directory, extension): """ The directory here can't solve """ files = [] for name in os.listdir(directory): if name.endswith(extension): files.append(f'{directory}/{name}') return files
2d31b443a1d3dfabd8fcca97b56e0b345be361c1
21,998
def xnnpack_optional_armcl_copts(): """Compiler flags to optionally enable ARM ComputeLibrary benchmarks.""" return []
ada0a64f1d00f87963ce90c6f9672683c196f36d
22,002
from datetime import datetime def datetime_to_str(dt: datetime) -> str: """Get a timestamp string from a datetime object. Args: dt: The datetime object. Returns: The timestamp string. """ return dt.strftime("%Y_%m_%d_%H_%M_%S")
9b604c18e648ce6783cd0ff8897842a8576225ab
22,003
def compare(c1, c2): """Compares two configuration dictionaries Returns: < 0 if c1 is bigger than c2 0 if they're equivalent sizes > 0 if c2 is bigger than c1 """ result = len(c2.keys()) - len(c1.keys()) while result > -1: for k, v in c1.items(): delta = c2[k] - v if delta < 0: result = -1 else: result += delta break return result
e2cee75ba09ec293ad3032039d89888aa3c46d7b
22,004
import errno def _retry_if_file_already_exists(exception): """Retry if file already exist exception raised.""" return ( isinstance(exception, OSError) and exception.errno == errno.EEXIST )
d0094c155cc43f8172f85b1aa1fa42d7348330c2
22,005
import argparse def build_parser(): """Build a parser for all the arguments""" parser = argparse.ArgumentParser(description='Build all the benchmarks') parser.add_argument( '--builddir', type=str, default='bd', help='Directory in which to build benchmarks and support code', ) parser.add_argument( '--logdir', type=str, default='logs', help='Directory in which to store logs', ) parser.add_argument( '--arch', type=str, required=True, help='The architecture for which to build', ) parser.add_argument( '--chip', type=str, default='default', help='The chip for which to build', ) parser.add_argument( '--board', type=str, default='default', help='The board for which to build', ) parser.add_argument('--cc', type=str, help='C compiler to use') parser.add_argument('--ld', type=str, help='Linker to use') parser.add_argument( '--cflags', type=str, help='Additional C compiler flags to use' ) parser.add_argument( '--ldflags', type=str, help='Additional linker flags to use' ) parser.add_argument( '--cc-define1-pattern', type=str, help='Pattern to define constant for compiler', ) parser.add_argument( '--cc-define2-pattern', type=str, help='Pattern to define constant to a specific value for compiler', ) parser.add_argument( '--cc-incdir-pattern', type=str, help='Pattern to specify include directory for the compiler', ) parser.add_argument( '--cc-input-pattern', type=str, help='Pattern to specify compiler input file', ) parser.add_argument( '--cc-output-pattern', type=str, help='Pattern to specify compiler output file', ) parser.add_argument( '--ld-input-pattern', type=str, help='Pattern to specify linker input file', ) parser.add_argument( '--ld-output-pattern', type=str, help='Pattern to specify linker output file', ) parser.add_argument( '--user-libs', type=str, help='Additional libraries to use' ) parser.add_argument( '--dummy-libs', type=str, help='Dummy libraries to build and link' ) parser.add_argument( '--cpu-mhz', type=int, help='Processor clock speed in MHz' ) parser.add_argument( '--warmup-heat', type=int, help='Number of warmup loops to execute before benchmark', ) parser.add_argument( '--clean', action='store_true', help='Rebuild everything' ) return parser
b572449697981bb499974adb538c9e6194990edd
22,006
def no_deprecated_adapter(adapter): """Modify an adapter to disable deprecated symbols. ``no_deprecated_adapter(adapter)(name, active, section)`` is like ``adapter(name, active, section)``, but unsets all deprecated symbols and sets ``MBEDTLS_DEPRECATED_REMOVED``. """ def continuation(name, active, section): if name == 'MBEDTLS_DEPRECATED_REMOVED': return True if adapter is None: return active return adapter(name, active, section) return continuation
2c18bf3d059a1a2d7555f09662509688023b61ed
22,007
def obuhvati(dijelovi): """Raspon koji obuhvaća sve dijelove koji imaju raspon.""" d = [p for p in dijelovi if hasattr(p, '_početak') and hasattr(p, '_kraj')] if d: return min(p._početak for p in d), max(p._kraj for p in d)
b78ae0044da989501fd7a95c0579375cda1c075b
22,008
from typing import Any import subprocess def _shell(args:str, **kwargs:Any) -> subprocess.CompletedProcess: """ Run a command in a shell for cross-platform support """ # WARN If shell=False on Windows then must give full path to the executable! return subprocess.run(args, shell=True, **kwargs)
b027977e3cea11ef5cd9900e2246eda31de536b7
22,009
def dict2cfgString(dictionary, separator="\n", assigner="="): """ Converts a dictionary into a string ​ Parameters ---------- dictionary : dict The dictionary to be transformed. separator : str, optional The character to be used to separate individual entries. The default is "\n". assigner: str, optional The character to represent the assignment from the key to the value. The default is "=". ​ Returns ------- str """ return "{}".format(separator).join([f"{k}{assigner}{v}" for k, v in dictionary.items()])
12c1f3b102429c22d1bb15714631908be41a63f2
22,010
def entry_check(entry, paramlist): """ Checks if every key is in the entry otherwise an empty key will be added """ for i in paramlist: if i not in entry: entry[i] = "" if entry[i] != "": if not isinstance(entry[i], int): if len(entry[i]) == 0: del entry[i] entry[i] = "" return 0
3edd686e63448306e6e1b1f6ff7c6e058cafe54b
22,012
import subprocess def VerifyURL(url): """ Verify that the given URL exists. url: string; the Google Storage URL of the image file in question. """ proc = subprocess.Popen(['gsutil', 'ls', url], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) if proc.wait() != 0: return False return True
fc61b2b0ca6459b78e8e36c268f3a82da92de704
22,013
def redirect(start_response, target): """Called to do a redirect""" start_response('302 Found', [('Location', target)]) return []
7f81ffbd47c32dad6db47571e70622e3b0b3ffa9
22,014
def convert_indentation(indentation): """ Converts integer- or string- indentation into string indentation for prepending. :param int | str indentation: :return: str """ return " " * indentation if isinstance(indentation, int) else indentation
97b223e0b1d3a210e0d3cc875d6b7bd4d4486d73
22,015
def mail_notification_label(context): """Mail notification name adapter""" return ', '.join(context.target_email or ())
ea5ca49181a121dc6b7518d7c4df52e5c1a6fd62
22,016
def intersect(l0, l1): """Given two lists return the intersection.""" return [e for e in l0 if e in l1]
4dbc2307eabfbe6312407b19e0924952c6dcc9cc
22,017
import re def seems_like_section_name(line): """Check whether `line` starts with 'Para' or ends with ':', ignoring case and whitespace.""" return bool( re.search(r'(^[^a-záéíóúü0-9]*para\b|:\s*$)', line, re.IGNORECASE) )
d1c10116319c39cb5e0b5c57a2007703578da75c
22,019
import os def get_immediate_subdirectories(a_dir, ignoreDirs): """Returns list of immediate subdirectories Directories that end with suffixes defined by ignoreDirs are ignored """ subDirs = [] for root, dirs, files in os.walk(a_dir): for myDir in dirs: ignore = False for ignoreDir in ignoreDirs: if myDir.endswith(ignoreDir): ignore = True if not ignore: subDirs.append(os.path.abspath(os.path.join(root, myDir))) return subDirs
45d591989ee9636da35be66ea57c4b3cc8b44602
22,020
def load_aci_data(): """" Static data simulating the response from the REST Call: https://{{URL}}/api/node/class/fvCEp.json?rsp-subtree=children&order-by=fvCEp.mac :return: aci_data """ aci_data = { "totalCount": "4", "imdata": [ { "fvCEp": { "attributes": { "annotation": "", "childAction": "", "contName": "", "dn": "uni/tn-SnV/ap-Rescue/epg-Web/cep-42:5D:BC:C4:00:00", "encap": "vlan-123", "extMngdBy": "", "id": "0", "idepdn": "", "ip": "10.193.101.10", "lcC": "learned", "lcOwn": "local", "mac": "42:5D:BC:C4:00:00", "mcastAddr": "not-applicable", "modTs": "2020-04-10T11:11:11.736+00:00", "monPolDn": "uni/tn-common/monepg-default", "name": "42:5D:BC:C4:00:00", "nameAlias": "", "status": "", "uid": "0", "uuid": "", "vmmSrc": "" } } }, { "fvCEp": { "attributes": { "annotation": "", "childAction": "", "contName": "", "dn": "uni/tn-SnV/ap-Evolution_X/epg-Web/cep-42:5D:BC:C4:00:00", "encap": "vlan-121", "extMngdBy": "", "id": "0", "idepdn": "", "ip": "2222::65:a", "lcC": "learned", "lcOwn": "local", "mac": "42:5D:BC:C4:00:00", "mcastAddr": "not-applicable", "modTs": "2020-04-10T11:11:11.736+00:00", "monPolDn": "uni/tn-common/monepg-default", "name": "42:5D:BC:C4:00:00", "nameAlias": "", "status": "", "uid": "0", "uuid": "", "vmmSrc": "" } } }, { "fvCEp": { "attributes": { "annotation": "", "childAction": "", "contName": "", "dn": "uni/tn-SnV/ap-Chaos/epg-Web/cep-42:5D:BC:C4:00:00", "encap": "vlan-125", "extMngdBy": "", "id": "0", "idepdn": "", "ip": "10.193.101.10", "lcC": "learned", "lcOwn": "local", "mac": "42:5D:BC:C4:00:00", "mcastAddr": "not-applicable", "modTs": "2020-04-10T11:11:11.736+00:00", "monPolDn": "uni/tn-common/monepg-default", "name": "42:5D:BC:C4:00:00", "nameAlias": "", "status": "", "uid": "0", "uuid": "", "vmmSrc": "" } } }, { "fvCEp": { "attributes": { "annotation": "", "childAction": "", "contName": "", "dn": "uni/tn-SnV/ap-Power_Up/epg-Web/cep-42:5D:BC:C4:00:00", "encap": "vlan-127", "extMngdBy": "", "id": "0", "idepdn": "", "ip": "2222::65:a", "lcC": "learned", "lcOwn": "local", "mac": "42:5D:BC:C4:00:00", "mcastAddr": "not-applicable", "modTs": "2020-04-10T11:11:11.736+00:00", "monPolDn": "uni/tn-common/monepg-default", "name": "42:5D:BC:C4:00:00", "nameAlias": "", "status": "", "uid": "0", "uuid": "", "vmmSrc": "" } } }, { "fvCEp": { "attributes": { "annotation": "", "childAction": "", "contName": "", "dn": "uni/tn-SnV/ap-Power_Up/epg-Web/cep-42:5D:BC:C4:00:00", "encap": "vlan-700", "extMngdBy": "", "id": "0", "idepdn": "", "ip": "2222::65:a", "lcC": "learned", "lcOwn": "local", "mac": "00:50:56:80:65:64", "mcastAddr": "not-applicable", "modTs": "2020-04-10T11:11:11.736+00:00", "monPolDn": "uni/tn-common/monepg-default", "name": "42:5D:BC:C4:00:00", "nameAlias": "", "status": "", "uid": "0", "uuid": "", "vmmSrc": "" } } } ] } return aci_data
0213ccddf258c2f934f48c8f46b29039857fc26f
22,021
import yaml def getConfig(config_path, env): """ :config_path: 설정 파일 경로 :env: 환경 [local, dev, prod] """ try: with open(config_path, encoding='UTF8') as file: CONSTANT = yaml.load(file, Loader=yaml.FullLoader)[env] return CONSTANT except FileNotFoundError: print("build first. $ python manage.py build")
d98e533e191076a7caae2b3473308351a86f037c
22,025
import copy def get_recurs_class(g, derivLink): """Find the recurs_class property in the contents. Return its value and the dictionary with recurs_value removed.""" recursClass = 0 if derivLink['content'] is None or len(derivLink['content']) <= 0: return 0, derivLink newDerivLink = copy.deepcopy(derivLink) for iObj in range(len(newDerivLink['content']))[::-1]: obj = newDerivLink['content'][iObj] if obj['name'] == 'recurs_class': try: recursClass = int(obj['value']) except ValueError: g.raise_error('Incorrect recurs_class value: ' + obj['value']) newDerivLink['content'].pop(iObj) return recursClass, newDerivLink
e787614a5d433352551c684d134d5d85b870ec4a
22,026
def mangle_string(string): """ Turns an arbitrary string into a decent foldername/filename (no underscores allowed)! """ string = string.replace(' ', '-') string = string.strip(",./;'[]\|_=+<>?:{}!@#$%^&*()`~") string = string.strip('"') return string
9d662d187e2d9b5e6f9f9a0c5373ceb52c457e93
22,027
def city_state(city, state, population=0): """Return a string representing a city-state pair.""" output_string = city.title() + ", " + state.title() if population: output_string += ' - population ' + str(population) return output_string
ae958598a57128cf36f63ff6bfb8181f9d07db31
22,028
def process_line(line, hint_occurence_count): """ :param line: :param hint_occurence_count: :return: """ # Remove column with unsorted sequence splitted = line.split('\t') col0 = splitted[0] col1 = splitted[1] if hint_occurence_count is not None: first_col_splitted = col0.split() num = first_col_splitted[-1] first_col_without_num = ' '.join(first_col_splitted[:-1]) first_col_words = first_col_without_num.split('/') first_col_words_no_empty = list(filter(None, first_col_words)) first_col_words_no_empty = list(map(str.strip, first_col_words_no_empty)) col0 = '/'.join([word + "({0:,d})".format(hint_occurence_count['_'.join(word.split())]) \ for word in first_col_words_no_empty]) + " {}".format(num) return '\t'.join([col0, col1])
22b2fb3e8a4b4cff6f6bf8f716b22370ee67aa26
22,029
import random def random_model(ctx, model_class_name): """ Get a random model identifier by class name. For example:: # db/fixtures/Category.yml {% for i in range(0, 10) %} category{{ i }}: name: {{ faker.name() }} {% endfor %} # db/fixtures/Post.yml a_blog_post: category: {{ random_model('Category') }} Will render to something like the following:: # db/fixtures/Post.yml (rendered) a blog_post: category: "Category(category7)" :param ctx: The context variables of the current template (passed automatically) :param model_class_name: The class name of the model to get. """ model_identifiers = ctx['model_identifiers'][model_class_name] if not model_identifiers: return 'None' idx = random.randrange(0, len(model_identifiers)) return '"%s(%s)"' % (model_class_name, model_identifiers[idx])
1780e18046bde5af819eb34001b245f610886898
22,030
import random def coin_flip(p=0.5): """Simulate a coin flip.""" return True if random.random() > p else False
51cd54a946cedb60589fdc24eb5f061afb713681
22,031
import numpy def vector2matrix(vecs): """Convert vector array to dense ndarray.""" m, n = vecs[0][1].size, len(vecs) r = numpy.zeros(shape=(m,n)).astype(numpy.float64) for i, v in vecs: r[::,i] = v return r
3816026abacbe324d78ac8201fd152033fdbb1d9
22,032
def getBoundingBox(veclist): """Calculate bounding box (pair of vectors with minimum and maximum coordinates). >>> getBoundingBox([(0,0,0), (1,1,2), (0.5,0.5,0.5)]) ((0, 0, 0), (1, 1, 2))""" if not veclist: # assume 3 dimensions if veclist is empty return (0,0,0), (0,0,0) # find bounding box dim = len(veclist[0]) return ( tuple((min(vec[i] for vec in veclist) for i in range(dim))), tuple((max(vec[i] for vec in veclist) for i in range(dim))))
a2c035f85071e5a9f8dfee2c98cc46e86439a0cc
22,033
def feed_options_str(feed_options): """Convert a FeedOptions dict of values into an appropriate string value. Amazon docs for VAT upload with details: https://m.media-amazon.com/images/G/01/B2B/DeveloperGuide/vat_calculation_service__dev_guide_H383rf73k4hsu1TYRH139kk134yzs.pdf (section 6.4) Example: feed_options = { "shippingid": "283845474", "totalAmount": 3.25, "totalvatamount": 1.23, "invoicenumber": "INT-3431-XJE3", "documenttype": "CreditNote", "transactionid": "amzn:crow:429491192ksjfhe39s", } print(feed_options_str(feed_options)) >>> "metadata:shippingid=283845474;metadata:totalAmount=3.25;metadata:totalvatamount=1.23; metadata:invoicenumber=INT-3431-XJE3;metadata:documenttype=CreditNote; metadata:transactionid=amzn:crow:429491192ksjfhe39s" """ if not feed_options: return None if not isinstance(feed_options, dict): raise ValueError("`feed_options` should be a dict or None") output = [] for key, val in feed_options.items(): outval = val if outval is True or outval is False: # Convert literal `True` or `False` to strings `"true"` and `"false"` outval = str(outval).lower() output.append(f"metadata:{key}={outval}") return ";".join(output)
995b2927efb94cd92733b1058f423e863ca9c6e2
22,034
def add_table_suffix(table, suffix): """Helper to deal with backticks when adding table suffix""" table = str(table) # Hack to handle SQLAlchemy tables if table.endswith("`"): table = table.rstrip("`") + suffix + "`" else: table = table + suffix return table
b5a46c60bb717c2b06949e4c47813a7c0f1764b7
22,036
def recall_eval_single(pre, ref): """Calculate recall for boundary vectors""" tot = ref.sum() if tot == 0: return 1.0, 0 diff = ref - pre E = (abs(diff) + diff) / 2 # recall errors r = float(tot - E.sum()) / tot return r, tot
3fe16d97563def3f3d9f54bda85bf48d678d513e
22,037
from typing import OrderedDict def create_param_dict(param_file_name, outputPrefix): """ Create a dictionary with the parameters and file with posterior density :param param_file_name: original parameter file :param outputPrefix: prefix given in ABCtoolbox config file for estimation :return: param_dict: ordered dict with the parameters and file with posterior density """ param_file = open(param_file_name, "r") param_dict = OrderedDict() for line in param_file: if "=" in line: param_dict[line.split("=")[0].strip()] = '{}model0_MarginalPosteriorDensities_Obs0.txt'.format(outputPrefix) param_file.close() return param_dict
9344c1727b5f7d23d3df1b79987d38e9bdff6191
22,039
import base64 import pickle def dic_pickle_dumps_and_b64(data): """ Pickles and base64 all data in a dictionary Used before sending an json over a socket :param data: to encode :return: encoded data """ for i in data: data[i] = base64.b64encode(pickle.dumps(data[i])) return data
691e39ab66edbb184ac4386ac46d87e6b948aa17
22,040
def isempty(line): """ Checks if a line is empty (contains only witespaces or tabs)""" if len(line.replace("\n","").replace(" ","").replace("\t","")) == 0: return True else: return False
abd8e429125cea6c9575d5e539f69f6a39a7ccfe
22,041
def filter_by_gender(df, male): """Filters the data by gender. Args: df: DataFrame. male: True if male, False otherwise. Returns: DataFrame. """ gender = 1 if male else 0 return df[df.male == gender]
ce8a339a24cbf930fd4e96dda9acc786dda45e07
22,042
def get_matched_dyads( dyads, d0_key="doctype", d1_key="doctype", d0_values=["foxnews"], d1_values=["foxnews"], ): """Filter which returns dyads which match the specified conditions. Args: dyads (list): list of 2-item tuples. Each item is a dictionary (document). d0_key (str) d1_key (str) d0_values (list): any values to match d1_values (list): any values to match Returns: matched_dyads (list): filtered version of dyads """ matched_dyads = [] for dyad in dyads: d0 = dyad[0] d1 = dyad[1] if d0[d0_key] in d0_values and d1[d1_key] in d1_values: matched_dyads.append(dyad) return matched_dyads
bffe8e624d9c08d377d712d111be4f0a396ff07c
22,043
def words_to_word_ids(data=[], word_to_id={}, unk_key = 'UNK'): """Given a context (words) in list format and the vocabulary, Returns a list of IDs to represent the context. Parameters ---------- data : a list of string or byte the context in list format word_to_id : a dictionary mapping words to unique IDs. unk_key : a string Unknown words = unk_key Returns -------- A list of IDs to represent the context. Examples -------- >>> words = tl.files.load_matt_mahoney_text8_dataset() >>> vocabulary_size = 50000 >>> data, count, dictionary, reverse_dictionary = \ ... tl.nlp.build_words_dataset(words, vocabulary_size, True) >>> context = [b'hello', b'how', b'are', b'you'] >>> ids = tl.nlp.words_to_word_ids(words, dictionary) >>> context = tl.nlp.word_ids_to_words(ids, reverse_dictionary) >>> print(ids) ... [6434, 311, 26, 207] >>> print(context) ... [b'hello', b'how', b'are', b'you'] Code References --------------- - `tensorflow.models.rnn.ptb.reader <https://github.com/tensorflow/tensorflow/tree/master/tensorflow/models/rnn/ptb>`_ """ # if isinstance(data[0], six.string_types): # print(type(data[0])) # # exit() # print(data[0]) # print(word_to_id) # return [word_to_id[str(word)] for word in data] # else: word_ids = [] for word in data: if word_to_id.get(word) is not None: word_ids.append(word_to_id[word]) else: word_ids.append(word_to_id[unk_key]) return word_ids # return [word_to_id[word] for word in data] # this one # if isinstance(data[0], str): # # print('is a string object') # return [word_to_id[word] for word in data] # else:#if isinstance(s, bytes): # # print('is a unicode object') # # print(data[0]) # return [word_to_id[str(word)] f
d94fc0a13449934fd6f3e7a5fc76929e6000c7d2
22,044
def clean_consecutive_duplicates( move_data, subset=None, keep='first', inplace=False ): """ Removes consecutive duplicate rows of the Dataframe, optionally only certain columns can be consider. Parameters ---------- move_data : dataframe The input trajectory data subset : Array of Strings, optional, default None(None by default) Specifies Column label or sequence of labels, considered for identifying duplicates. By default all columns are used. keep : 'first', 'last', optional, default 'first' If keep is set as first, all the duplicates except for the first occurrence will be dropped. On the other hand if set to last, all duplicates except for the last occurrence will be dropped. If set to False, all duplicates are dropped. inplace : boolean, optional, default False if set to true the original dataframe will be altered, the duplicates will be dropped in place, otherwise a copy will be returned. Returns ------- dataframe or None The filtered trajectories points without consecutive duplicates. """ if keep == 'first': n = 1 else: n = -1 if subset is None: filter_ = (move_data.shift(n) != move_data).any(axis=1) else: filter_ = (move_data[subset].shift(n) != move_data[subset]).any(axis=1) return move_data.drop(index=move_data[~filter_].index, inplace=inplace)
574ba4f6fba2d65f9680869178be1be9b45f0b97
22,047
from pathlib import Path def hmm_data_exists(file_path: Path) -> bool: """ Checks if HMM data exists in the local data path. :param file_path: Path to where `profiles.hmm` should be :return: True if both the `hmm` directory and `profiles.hmm` exist, else False """ return file_path.parent.is_dir() and file_path.is_file()
417e98366a6e458badd8c7b16d58468efdb9af53
22,049
import os import json def load_config(filename): """Loads the configuration file.""" config_file = os.path.join('config_files', filename) with open(config_file, 'r') as conf_file: config = json.load(conf_file) return config
70f7351d26f5ff514b0a39edc5df3064758d71eb
22,050
def parse_schema(raw: dict) -> dict: """Parse a field, adapter, or config schema into a more user friendly format. Args: raw: original schema """ parsed = {} if raw: schemas = raw["items"] required = raw["required"] for schema in schemas: name = schema["name"] schema["required"] = name in required parsed[name] = schema return parsed
b3dd036b18a8847278d967bf363b212ee35d0d93
22,051
def get_program_number(txt_row): """ Checks if the current line of text contains a program definition. Args: txt_row (string): text line to check. Returns: An integer number. If program number cannot be found, or is invalid in some way, a large negative number is returned. """ num = -9999 len_txt = len(txt_row) if (len_txt < 2): return num if (txt_row[0] == "O"): numeric_part = txt_row[1:len_txt] try: conv_num = int(numeric_part) num = conv_num except ValueError: pass return num
92f67bef6f36ad4e88e4840faa0878e72c0bb78c
22,052
def write_dfs_to_filepaths(dfs, filepaths): """ Accepts a list of pandas dataframes - dfs and a parralel list of filepaths - pathlib path objects Writes the dataframes to the filepaths as csvs with no index Returns the number of files written integer """ n = 0 for df, filepath in zip(dfs, filepaths): if not filepath.exists(): filepath.parent.mkdir(parents=True, exist_ok=True) df.to_csv(filepath, index=False) n += 1 return n
2e66c419f1a0a4472de0527cb157d2bf3bf33de8
22,053
def display_name(given_name, family_name, full_name): """ Return an h-compatible display name the given name parts. LTI 1.1 launch requests have separate given_name (lis_person_name_given), family_name (lis_person_name_family) and full_name (lis_person_name_full) parameters. This function returns a single display name string based on these three separate names. """ name = full_name.strip() if not name: given_name = given_name.strip() family_name = family_name.strip() name = " ".join((given_name, family_name)).strip() if not name: return "Anonymous" # The maximum length of an h display name. display_name_max_length = 30 if len(name) <= display_name_max_length: return name return name[: display_name_max_length - 1].rstrip() + "…"
0453024b715ad25d3b9dfb6c283d29f7504ac9ff
22,054
def ispath(text: str): """Return True if text starts with a LwM2M Path""" text = text.strip() path = text.split("=")[0] path = path.strip() if "//" in path: return False parts = path.split("/") digits = "".join(parts) return digits.isdigit()
6278fef2b234b66b0aaaa8e3eb2eff5c89404962
22,055
def format_duration(time): """format duration, unit of input time should be 'second'.""" fmt = "" if time < 1: fmt = "{:.3f} ms".format(time * 1000) elif time >= 1 and time < 60: # seconds fmt = "{:.3f} sec".format(time) elif time >= 60 and time < 3600: # minutes and seconds min = int(time / 60) sec = time - min * 60 fmt = "{:d} min {:.3f} sec".format(min, sec) elif time >= 3600 and time < 86400: # hours, minutes and seconds hour = int(time / 3600) min = int((time - hour * 3600) / 60) sec = time - hour * 3600 - min * 60 fmt = "{:d} hour {:d} min {:.3f} sec".format(hour, min, sec) else: # days, hours, minutes and seconds day = int(time / 86400) hour = int((time - day * 86400) / 3600) min = int((time - day * 86400 - hour * 3600) / 60) sec = time - day * 86400 - hour * 3600 - min * 60 fmt = "{:d} day {:d} hour {:d} min {:.3f} sec".format(day, hour, min, sec) return fmt
888357ed7be55ce98ea65e537addb22c1b4873fe
22,056
def gnome_sort(lst: list) -> list: """ Pure implementation of the gnome sort algorithm in Python Take some mutable ordered collection with heterogeneous comparable items inside as arguments, return the same collection ordered by ascending. Examples: >>> gnome_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> gnome_sort([]) [] >>> gnome_sort([-2, -5, -45]) [-45, -5, -2] >>> "".join(gnome_sort(list(set("Gnomes are stupid!")))) ' !Gadeimnoprstu' """ if len(lst) <= 1: return lst i = 1 while i < len(lst): if lst[i - 1] <= lst[i]: i += 1 else: lst[i - 1], lst[i] = lst[i], lst[i - 1] i -= 1 if i == 0: i = 1 return lst
5aae393eabd046c20f51125e405d211555ed9bdb
22,057
def get_numeric_trace_attribute_value(trace, trace_attribute): """ Get the value of a numeric trace attribute from a given trace Parameters ------------ trace Trace of the log Returns ------------ value Value of the numeric trace attribute for the given trace """ if trace_attribute in trace.attributes: return trace.attributes[trace_attribute] raise Exception("at least a trace without trace attribute: " + trace_attribute)
fd757861972dce8d9624efa13773bd4624cf9ace
22,058
import os import csv def known_cities(): """Show a list of known cities and coordinates returns dictionary with cities as keys and list of it coordinates FileNotFoundError """ if not os.path.exists('cities.txt'): return None else: with open('cities.txt', mode='r') as cities_file: csv_reader = csv.reader(cities_file, delimiter=',') locations = 0 locs=[] for row in csv_reader: if locations == 0: # print(", ".join(row)) locations += 1 locs.append(row) else: if row: locs.append(row) #print(", ".join(row)) locations+= 1 if locations <=1: return None return locs
f60c61b08ec76f97bd4b64421295235e38919cfc
22,059
def get_in_out_tensors(graph): """ Get the input and output tensors from the TensorFlow graph 'graph'. """ # Get the graph nodes that perform computation on tensors ops = graph.get_operations() # Initialize input and output tensors inputs = [] outputs_set = set(ops) # Process operations for op in ops: # The input nodes are nodes without input if len(op.inputs) == 0 and op.type != 'Const': inputs.append(op) # The output nodes are nodes without output else: for input_tensor in op.inputs: if input_tensor.op in outputs_set: outputs_set.remove(input_tensor.op) outputs = list(outputs_set) return inputs, outputs
8c7c5c068bcb11d5d4f80f191af899dbf7e7aab1
22,061
import re def _match_channel_pattern(channel_name): """Returns a regex match against the expected channel name format. The returned match object contains three named groups: source, detector, and wavelength. If no match is found, a ValueError is raised. Parameters ---------- channel_name : str The name of the channel. """ rgx = r'^S(?P<source>\d+)_D(?P<detector>\d+) (?P<wavelength>\d+)$' match = re.fullmatch(rgx, channel_name) if match is None: msg = f'channel name does not match expected pattern: {channel_name}' raise ValueError(msg) return match
e40ba1b1c075d2fabae570bab36869b168ef6121
22,062
def give_me_five(): """Returns five.""" return 5
b4b8b2015d7758fa1d2cb8b8c8381be974de63c2
22,063
def _interval(from_, to, interval, value, tolerance=1e-9): """clamp value to an interval between from_ and to range""" if interval > (to - from_): raise ValueError("Invalid increment") if value < from_ or value > to: raise ValueError("Invalid value") if abs(value - from_) < tolerance or abs(value - to) < tolerance: return value quotient, remainder = divmod(value, interval) if remainder < tolerance: return quotient * interval half_increment = interval / 2 if remainder > half_increment: return interval * (quotient + 1) else: return interval * quotient
c74d5d461084ea18ac557332adb9638c50ced782
22,065
def in_range(val, rng) -> bool: """ 判断数值在范围内. """ assert len(rng) == 2 return (rng[0] is None or val >= rng[0]) and (rng[1] is None or val <= rng[1])
08a483a640680047e4e259da0121fceb388f408b
22,066
from datetime import datetime import pytz def with_gmt_offset(timezones, now=None): """ Given a list of timezones (either strings of timezone objects), return a list of choices with * values equal to what was passed in * display strings formated with GMT offsets and without underscores. For example: "GMT-05:00 America/New York" * sorted by their timezone offset """ now = now or datetime.utcnow() _choices = [] for tz in timezones: tz_str = str(tz) delta = pytz.timezone(tz_str).utcoffset(now) display = "GMT{sign}{gmt_diff} {timezone}".format( sign='+' if delta == abs(delta) else '-', gmt_diff=str(abs(delta)).zfill(8)[:-3], timezone=tz_str.replace('_', ' ') ) _choices.append((delta, tz, display)) _choices.sort(key=lambda x: x[0]) choices = [(one, two) for zero, one, two in _choices] return choices
b1916a3889016d3c608806fe0dab62f05801d5eb
22,068
def apply_activation_forward(forward_pass): """Decorator that ensures that a layer's activation function is applied after the layer during forward propagation. """ def wrapper(*args): output = forward_pass(args[0], args[1]) if args[0].activation: return args[0].activation.forward_propagation(output) else: return output return wrapper
950f2b96cadfc8df075763b25961c7f088ec9232
22,069
import json import random def create_intro(): """Uses content from content/introductions.json to create an introduction module for sleep-aid scripts. Returns: _type_: _description_ """ with open(r'content\introductions.json') as source: content_block = json.load(source) first_lines = content_block['first'] warnings = content_block['warning'] follows = content_block['follow'] first_line = first_lines[random.randint(0, (len(first_lines) - 1))] warning = warnings[random.randint(0, (len(warnings) - 1))] follow = follows[random.randint(0, (len(follows) - 1))] intro_module = first_line + "\n" + warning + "\n" + follow + "\n" return intro_module
53b26eebe9cdbf538c056ffe3752982cf16c32f6
22,070
def get_option_name(name): # type: (str) -> str """Return a command-line option name from the given option name.""" if name == 'targets': name = 'target' return f'--{name.replace("_", "-")}'
77d4625be99944fb0748f6f3524d453fbf511795
22,071
import numpy def gridVertices(dim0Array, dim1Array, dtype): """Generate an array of 2D positions from 2 arrays of 1D coordinates. :param dim0Array: 1D array-like of coordinates along the first dimension. :param dim1Array: 1D array-like of coordinates along the second dimension. :param numpy.dtype dtype: Data type of the output array. :return: Array of grid coordinates. :rtype: numpy.ndarray with shape: (len(dim0Array), len(dim1Array), 2) """ grid = numpy.empty((len(dim0Array), len(dim1Array), 2), dtype=dtype) grid.T[0, :, :] = dim0Array grid.T[1, :, :] = numpy.array(dim1Array, copy=False)[:, None] return grid
743d1bff728c1a9dcb98a3894150ace9c0dfffaa
22,074
def get_latest_file_url(files: dict, starts_with: str, file_extension: str) -> str: """ Get the url to a file which should start and have a specific file extension. Parameters ---------- files : dict Keys are the filenames and the values are the urls. starts_with : str Start of the filename. file_extension : str End of the filename. For example the file extension. Returns ------- str URL of the most recent file. """ filtered_files = { fname: link for fname, link in files.items() if (starts_with in fname[: len(starts_with)]) and file_extension in fname and "volume" not in fname } if len(filtered_files) == 0: return "" newest_file = sorted(filtered_files.keys())[-1] link = files.get(newest_file, "") return link
4b9d115c983f9cdcf1a96905d7c198b688851dea
22,075
import math def atanD(x): """ x est un nombre ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Retourne un angle en degrés dont la tangente vaut x. """ return math.degrees(math.atan(x))
f602c1e9b5b24a642328b273d011aa8196786f77
22,076
def populate_trie_with_target(a_trie, target_sequences): """Read from a file with target sequences and load them into a trie structure Args:a_trie(trie) , target_sequences(str) a filename with target DNA sequences Returns:a_trie(trie) populated with all target strings. This function reads the file and then line by line loads them into the trie for future search """ with open(target_sequences, 'r') as f: for line in f: a_trie.add(line.strip().upper()) return a_trie
ce6c25e92d763a020921acff558cfaa26d88f09c
22,077
def _extract_from_subworkflow(vs, step): """Remove internal variable names when moving from sub-workflow to main. """ substep_ids = set([x.name for x in step.workflow]) out = [] for var in vs: internal = False parts = var["id"].split("/") if len(parts) > 1: if parts[0] in substep_ids: internal = True if not internal: var.pop("source", None) out.append(var) return out
bd565b444084aa2b5dfab0444aaf13344240d17d
22,078
from math import isqrt def int_sqrt(n: int) -> int: """ Returns the integer square root of n. :param n: an int value :return: the integer square root """ try: return isqrt(n) except ImportError: # For Python <=3.7 if n < 0: raise ValueError("Square root is not defined for negative numbers.") if n == 0: return 0 if n <= 3: return 1 a = 1 << ((1 + n.bit_length()) >> 1) while True: b = (a + n // a) >> 1 if b >= a: return a a = b
36b1f8f7becff523c6ab154aa90cdb2346310406
22,079
def convolucion_imagen(imagen: list) -> list: """ Opera la imagen con la matriz de convolución dada por el usuario. Parámetros: imagen (list) Matriz (M,N,3) con la imagen a convolucionar. """ convolucion = [[1, 1, 1], [1, 1, 1], [1, 1, 1]] return imagen
8fd5297d54967f5d57a8988116228ff874f28026
22,080
def chebyshev_distance(a, b): """ Calculate the Chebyshev distance of two vectors. """ distances = [] for x, y in zip(a, b): distances.append(abs(x - y)) distance = max(distances) return distance
aa6fccc804ccbeb312e0bb41693feb05787d84f4
22,081
from pathlib import Path import os def home_dir(): """ Get home dir of current user with os.sep """ return str(Path.home()) + os.sep
2f8833d1b2fd0a0a590f6c7c167d469f28543686
22,082
def sort(li): """ Performs a mini radix sort on the top ten documents by first sorting on document ids, then sorting on document ranking. As sorted() is stable, this ensures that any documents with identical rankings will be sorted on their document ids in increasing order """ #first sort on document id li = sorted(li,key=lambda x: x[0]) #then sort on document ranking li = sorted(li,key=lambda x: x[1], reverse=True) #sort on window length li = sorted(li,key=lambda x: x[3]) #then sort on number of present words li = sorted(li,key=lambda x: x[2], reverse=True) return li
1bd1c440e8e2492f22d67b78215d9a8c1f483c60
22,084
def basic_falling_factorial(high, low): """Returns the high! / low! """ if low == high: return 1 if high < low: return 0 i = low + 1 ans = 1 while i <= high: ans *= i i += 1 return ans
3cf429905f9c5e24bdf00fd48b8cdef1e85814ff
22,085
def lammps_equilibrated_npt(job): """Check if the lammps equilibration step has run and passed is_equilibrated for the job.""" return job.isfile("equilibrated_npt.restart") and True
4d4ff20fc020b29319393989f852a7b2479627f0
22,086
def listar(): """Verifica se o endpoint está funcional""" return {"resultado": "OK"}
ad945d95e99f78091ab7fccd9d9c4837ffa3eb58
22,087
import time def make_tstamp(): """Return the current epoch time in microseconds.""" return int(time.time() * 1000000)
3f09dd141b7404c1c658bbdc7ee0adbd88b1a2df
22,090
import re def validate_resource_id(res_id): """ Helper to check if resource_id value is valid """ return True if re.match(r"^[a-z0-9-:]+$", res_id) else False
9e252ab2bffe143175beb72958eff6cf80b9131f
22,092