content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def fluid_is_valid_2(pos): """Determines whether a given position is valid or not.""" return min(pos) >= 0 and pos[0] <= 2 and pos[1] < 1 and pos[2] < 1
4aed57e61613f5fd1cfb13a4aeae6221b906f5af
82,502
def date_to_numerical(date): """convert date string to a number for chronological sorting. Returns: value = year * 2000 + month * 100 + date the last two decimal digits are reserved for dates. The middle two digits will have months values 1 ~ 12. year * 2000 is garanteed to have value larger than month * 100. """ month_val = int(date[0:2]) date_val = int(date[2:4]) year_val = int(date[4:8]) return year_val * 2000 + month_val * 100 + date_val
bca4309602b259dab0365b9dbe9013ee2c1144bb
82,503
def strip_comments(data, line_comment='#'): """Strip comments from the input string and return the result :param data: multiline text to strip comments from :type data: str :param line_comment: the line comment delimiter :type line_comment: str :rtype: str """ return '\n'.join([l for l in data.splitlines() if l and l[0] != line_comment]).strip()
cba01cb8b079d1927b1cb32674f62085cae020a5
82,506
def fbeta_precision_recall(precision_val, recall_val, beta): """ Fbeta score for a given precision and recall :param precision_val: precision score of a certain class :param recall_val: recall score of a certain class :param beta: beta coefficient :return: fbeta score """ beta2 = beta ** 2 if precision_val + recall_val == 0: return 0 return (1 + beta2) * (precision_val * recall_val) / (beta2 * precision_val + recall_val)
942852910557ef9f880b91da8164bc5b511a9d33
82,510
def coding_problem_31(s, t, debt=0): """ Given two strings, compute the edit distance between them. The edit distance between two strings refers to the minimum number of character insertions, deletions, and substitutions required to change one string to the other. Example: >>> coding_problem_31("kitten", "sitting") # k>>s, e>>i, +g 3 >>> coding_problem_31("kitten", "cat") # k>>c, i>>a, -ten 5 >>> coding_problem_31("black", "white") 5 >>> coding_problem_31("top", "dog") 2 """ if not s or not t: return len(s) + len(t) + debt i = coding_problem_31(s, t[1:], debt + 1) # insertion d = coding_problem_31(s[1:], t, debt + 1) # deletion s = coding_problem_31(s[1:], t[1:], debt + (s[0] != t[0])) # substitution / matching character return min(i, d, s)
4b42065bd8e6f44386cd79344004eddbe4226e02
82,513
def isIDotData(x): """Test if the given object is an IDotData. We could test if it inherits from IDotDataRoot, but we also want to allow unrelated objects to implement the iDotData interface. If their class has an isIDotData attribute, we assume the object is an IDotData. If a class inherits from IDotDataRoot then it automatically inherits this attribute.""" return hasattr( type( x ), 'isIDotData' )
b67aea4393769d1d6a0cc73f788729d729ade788
82,514
from typing import Tuple def get_center(frame_size: Tuple[int, int]) -> Tuple[float, float]: """ Using the frame size of an image, determine the precise position of its center in the usual coordinate system that we place on our data: The *center* of the pixel in the bottom left corner of the image is defined as (0, 0), so the bottom left corner of the image is located at (-0.5, -0.5). This function is essentially a simplified port of the corresponding PynPoint function :py:func:`pynpoint.util.image.center_subpixel()`. Args: frame_size: A tuple of integers `(x_size, y_size)` specifying the size of the images (in pixels). Returns: A tuple of floats, `(center_x, center_y)` containing the position of the center of the image. """ return frame_size[0] / 2 - 0.5, frame_size[1] / 2 - 0.5
cbc87580ee097059ba4f4d4f5bb67dab337572f0
82,517
def clean_whitespaces(text): """ Remove multiple whitespaces from text. Also removes leading and trailing \ whitespaces. :param text: Text to remove multiple whitespaces from. :returns: A cleaned text. >>> clean_whitespaces("this is a text with spaces") 'this is a text with spaces' """ return ' '.join(text.strip().split())
71bf75d448a7991fb8dc3dfdc8344be41db2b9c2
82,518
def match_list_item(server, channel, item): """Match a list item against a server.channel, channel, or server combination.""" if "." in item: if "{}.{}".format(server, channel) == item: return True else: if channel == item or server == item: return True return False
f6b3c88b6519515f96bcb64c70f48405dc8aba99
82,520
def GetRemoteCommand(container, command): """Assemble the remote command list given user-supplied args. If a container argument is supplied, run `sudo docker exec -i[t] CONTAINER_ID COMMAND [ARGS...]` on the remote. Args: container: str or None, name of container to enter during connection. command: [str] or None, the remote command to execute. If no command is given, allocate a TTY. Returns: [str] or None, Remote command to run or None if no command. """ if container: # The `/bin/sh` is for listening to commands. args = command or ['/bin/sh'] flags = '-i' if command else '-it' return ['sudo', 'docker', 'exec', flags, container] + args if command: return command return None
e3a589b06901a6f4f336497825a00020823c326c
82,524
def get_file_content(file_path): """Returns file content.""" with open(file_path, 'r+') as f: file_content = f.read() return file_content
ed5655c80538fa9f0615c5b53f7b2ec7cc39b99a
82,527
import re def remove_spaces(str): """ Replace any spaces with underscore. This for links.""" return re.sub(" ","_",str)
c6f44b308d70f1cfb6df9dc0c23328bc75890426
82,530
def load_table(loader, filename, index): """ Load a table from the specified index within a file as an array-like object. """ return loader(filename, index)
badd3087f83e1f567a6645cc97873b9abbc45088
82,531
def IndexOfNth(s, value, n): """Gets the index of Nth occurance of a given character in a string. :param str s: Input string :param char value: Input char to be searched. :param int n: Nth occurrence of char to be searched. :return: Index of the Nth occurrence in the string. :rtype: int """ remaining = n for i, elt in enumerate(s): if elt == value: remaining -= 1 if remaining == 0: return i return -1
ae5fb070c7eebd442ab52d8731140dfef15d4d49
82,540
from typing import Sequence def all_of_type(seq: Sequence, element_type) -> bool: """Return true if all elements in sequence are of the same type.""" for item in seq: if type(item) != element_type: return False return True
feb2f8dadf35795d85742b6f70eba07f365aaa31
82,542
def subset(data_frame, start, offset): """Get rows from start to start + offset Args: data_frame (DataFrame): data start (int): start row index offset (int): offset Returns: DataFrame: data_frame subset """ return data_frame.iloc[start:start + offset]
6fb5c36c2a8c1745e5b5e995c5945abb59c0cd59
82,543
def ParseSize(size): """Parse a size. Args: size: '<number><suffix>', where suffix is 'K', 'M', or 'G'. Returns: A size in bytes equivalent to the human-readable size given. Raises: A ValueError if <suffix> is unrecognised or <number> is not a base-10 number. """ units = ['K', 'M', 'G'] if size.isdigit(): return int(size) else: unit = size[-1].upper() size_bytes = int(size[:-1]) * 1024 if unit not in units: raise ValueError('unrecognised unit suffix "{}" for size {}'.format( unit, size)) while units.pop(0) != unit: size_bytes *= 1024 return size_bytes
136d1d4db3bdcfba4e660c3328cdfae4a31f3e73
82,546
def attrdict(obj): """Create a dictionary of all attributes in an object. This will work for classes with __slots__ or __dict__. The returned object may or may not be the object and so should *not be modified*""" try: return vars(obj) except TypeError: return {a: getattr(obj, a) for a in dir(obj)}
9786ee6fc1e8fd638a9bef5578d773b6c4ba2055
82,550
def search_backwards(text, start_point, limit, pattern): """Search the range [limit, start_point] for instances of |pattern|. Returns the one closest to |start_point|. Returns |limit| if none are found.""" assert limit < start_point matches = list(pattern.finditer(text[limit:start_point])) if len(matches) == 0: return limit return limit + matches[-1].start(0)
939054d3fce5abdf2420914ec7dac31d0548ec00
82,552
import math def round_to_next_target(hosts, raw_target): """Round up from a target percentage to the next achievable value. When the number of hosts is low, there's a minimum jump in percentage from host-to-host. This rounds up to the next value so that the user isn't surprised by the deploy going further than expected from the prompts. """ minimum_step_size = 1. / len(hosts) * 100 return int(math.ceil(raw_target / minimum_step_size) * minimum_step_size)
1e6fdb016f855b5674eeede97b7eb63658eace08
82,558
def sort_by_value_get_col(mat): """ Sort a sparse coo_matrix by values and returns the columns (the matrix has 1 row) :param mat: the matrix :return: a sorted list of tuples columns by descending values """ sorted_tuples = sorted(mat, key=lambda x: x[2], reverse=True) if len(sorted_tuples) == 0: return [] rows, columns, values = zip(*sorted_tuples) return columns
fbafc01592174c3770e9c2f4855f957ffda4ccdc
82,563
def center_bin(bins): """Shift a bin by half a step and remove the last item. This function can be used to prepare a suitable x axis for plotting. """ return 0.5*(bins[1] - bins[0]) + bins[:-1]
6d60a78c7b03f93f68c8005d226f5f4f098607ed
82,565
def load_from_file(filename): """Load wordlist from file. Args: filename: Name of file to load wordlist from Returns: List of words read from file """ with open(filename, 'r') as in_file: return in_file.read().splitlines()
d85a8335be02ac49e731374411cf53106b102036
82,566
def item_prefix(item): """ Get the item prefix ('+','++','-','--',''). """ if item.startswith('++'): prefix = '++' elif item.startswith('+'): prefix = '+' elif item.startswith('--'): prefix = '--' elif item.startswith('-'): prefix = '-' else: prefix = '' return prefix
0ab5f77bf6b44a3770053785c835e9385f5b569f
82,572
def _parse_level_continuation_record(lc_rec): """ This Parses and ENSDF level record Parameters ---------- g : re.MatchObject regular expression MatchObject Returns ------- dat : dict dictionary of branching ratios of different reaction channels """ g = lc_rec.groups() dat = {} raw_children = g[-1].replace(' AP ', '=') raw_children = raw_children.replace('$', ' ').split() for raw_child in raw_children: if '=' in raw_child: rx, br = raw_child.split('=')[:2] br = br.strip() else: continue if '%' in rx and '?' not in br and len(br) > 0: dat[rx] = br return dat
8d50b2e700f78907f61fefc2c90670692dd67c91
82,580
def read_line_from_socket(caller): """ Given a connection, read a single "line" ending in a newline char """ line = "" buf = caller.cnxn.recv(64) while(len(buf) > 0 and '\n' not in buf): line += buf.decode() buf = caller.cnxn.recv(64) line = (line + buf.decode()) line = line.replace('\n', '') line = line.replace('\r', '') return line
2f7e62727b3a09caf4b16d66b995d1c6d89f09be
82,581
import random def makeStarKeyword( isFind, xyPos, randRange, centroidRad, index = 0, xyErr = (1.0, 1.0), asymm = 10.0, fwhm = 3.0, counts = 10000, bkgnd = 100, ampl = 3000, ): """Return a star keyword with values. The fields are as follows, where lengths and positions are in binned pixels and intensities are in ADUs: 0 type characer: c = centroid, f = findstars, g = guide star 1 index: an index identifying the star within the list of stars returned by the command. 2,3 x,yCenter: centroid 4,5 x,yError: estimated standard deviation of x,yCenter 6 radius: radius of centroid region 7 asymmetry: a measure of the asymmetry of the object; the value minimized by PyGuide.centroid. Warning: not normalized, so probably not much use. 8 FWHM major 9 FWHM minor 10 ellMajAng: angle of ellipse major axis in x,y frame (deg) 11 chiSq: goodness of fit to model star (a double gaussian). From PyGuide.starShape. 12 counts: sum of all unmasked pixels within the centroid radius. From PyGuide.centroid 13 background: background level of fit to model star. From PyGuide.starShape 14 amplitude: amplitude of fit to model star. From PyGuide.starShape For "g" stars, the two following fields are added: 15,16 predicted x,y position """ if isFind: typeChar = "f" else: typeChar = "c" if randRange > 0: xyPos = [random.uniform(val - randRange, val + randRange) for val in xyPos] return "star=%s, %d, %0.2f, %0.2f, %0.2f, %0.2f, %0.0f, %0.0f, %0.1f, %0.1f, 0.0, 5, %0.0f, %0.1f, %0.1f" % \ (typeChar, index, xyPos[0], xyPos[1], xyErr[0], xyErr[1], centroidRad, asymm, fwhm, fwhm, counts, bkgnd, ampl)
12024ac7d0732455d8f5afcae4b002e860db9c0c
82,585
def get_spaces_count_at_beginning(text): """ Returns the total number of spaces at the beginning of the given text :param text: str :return: int """ return len(text) - len(text.lstrip())
29a8018b327ae4b94e1ffc526825b56d52a1177b
82,586
import configparser def load_config(config_path, logger): """ Load the configuration file and return a ConfigParser object """ config = configparser.ConfigParser() config_found = config.read(config_path) if not config_found: logger.warning('Config file ' + config_path + ' not found, using defaults') return config
53431f6720902ed4b6fe30ee1f8c37281decc3e8
82,588
def standard_approximation(mean, std): """Calculate standard approximation of mean and standard deviation. Calculates the standard approximation of mean and standard deviation by assuming that we are dealing with a normal distribution. Parameters ---------- mean : float Mean of the observed distribution std : float Standard deviation of the observed distribution Returns ------- Median and IQR under the assumption that the values have been observed from a normal distribution. """ median = mean iqr = std * 1.35 return median, iqr
4fb0d5beb6426b5aa1b54158385eb50e87adf392
82,589
def double(value): """ Return twice the input value """ return value * 2
337678a2f5afa2ad202a6320a7b9ba92c2660e86
82,591
def fork_url(feedstock_url: str, username: str) -> str: """Creates the URL of the user's fork.""" beg, end = feedstock_url.rsplit("/", 1) beg = beg[:-11] # chop off 'conda-forge' url = beg + username + "/" + end return url
203bcf2fb862f1e01200cda53f190b97e7829d89
82,592
def trim_to(string, length=80, tail="..."): """Returns a string that is length long. tail added if trimmed :param string string: String you want to trim :param int length: max length for the string :param string tail: appended to strings that were trimmed. """ if len(string) > length: return string[:length] + tail else: return string
49b7e536277327726b9543526e02551c6bb92bf1
82,598
def identity(x): """Returns its argument as it is.""" return x
c9ccb24dcde2452bd24919c01f14ca2fc836af29
82,600
def _create_evg_build_variant_map(expansions_file_data): """ Generate relationship of base buildvariant to generated buildvariant. :param expansions_file_data: Config data file to use. :return: Map of base buildvariants to their generated buildvariants. """ burn_in_tag_build_variants = expansions_file_data["burn_in_tag_buildvariants"] if burn_in_tag_build_variants: return { base_variant: f"{base_variant}-required" for base_variant in burn_in_tag_build_variants.split(" ") } return {}
91e44ffa85e616d23ffe4250ea0f2d9e304b6f58
82,602
def _intersect(groupA, groupB): """Returns the intersection of groupA and groupB""" return [item for item in groupA if item in groupB]
aee0728d12414ce91192d69fafcfafc43379a3dd
82,603
def sos_model_dict(scenario_only_sos_model_dict): """Config for a SosModel with one scenario and one sector model """ config = scenario_only_sos_model_dict config['sector_models'] = [ { 'name': 'economic_model', 'inputs': [], 'parameters': [], 'outputs': [ { 'name': 'gva', 'dims': ['LSOA'], 'coords': {'LSOA': [1, 2, 3]}, 'dtype': 'float', 'unit': 'million GBP' } ] }, { 'name': 'water_supply', 'inputs': [ { 'name': 'precipitation', 'dims': ['LSOA'], 'coords': {'LSOA': [1, 2, 3]}, 'dtype': 'float', 'unit': 'ml' }, { 'name': 'rGVA', 'dims': ['LSOA'], 'coords': {'LSOA': [1, 2, 3]}, 'dtype': 'float', 'unit': 'million GBP' }, { 'name': 'reservoir_level', 'dims': ['LSOA'], 'coords': {'LSOA': [1, 2, 3]}, 'dtype': 'float', 'unit': 'ml' } ], 'parameters': [], 'outputs': [ { 'name': 'water', 'dims': ['LSOA'], 'coords': {'LSOA': [1, 2, 3]}, 'dtype': 'float', 'unit': 'Ml' }, { 'name': 'reservoir_level', 'dims': ['LSOA'], 'coords': {'LSOA': [1, 2, 3]}, 'dtype': 'float', 'unit': 'ml' } ] } ] config['scenario_dependencies'] = [ { 'source': 'climate', 'source_output': 'precipitation', 'sink_input': 'precipitation', 'sink': 'water_supply', 'timestep': 'CURRENT' }, { 'source': 'climate', 'source_output': 'reservoir_level', 'sink_input': 'reservoir_level', 'sink': 'water_supply', 'timestep': 'CURRENT' } ] config['model_dependencies'] = [ { 'source': 'economic_model', 'source_output': 'gva', 'sink_input': 'rGVA', 'sink': 'water_supply', 'timestep': 'CURRENT' }, { 'source': 'water_supply', 'source_output': 'reservoir_level', 'sink_input': 'reservoir_level', 'sink': 'water_supply', 'timestep': 'PREVIOUS' } ] return config
90e6de680b22b77ca2eb32ee015afcfcbe415364
82,606
from typing import Any def _make_arg_str(arg: Any) -> str: """Helper function to convert arg to str. :param arg: argument :type arg: Any :return: arg converted to str :rtype: str """ arg = str(arg) too_big = len(arg) > 15 or "\n" in arg return "..." if too_big else arg
480161c8061df2fdfc14f051fd0c29657fbebc56
82,608
def mapped_opts(v): """ Used internally when creating a string of options to pass to Maxima. INPUT: - ``v`` - an object OUTPUT: a string. The main use of this is to turn Python bools into lower case strings. EXAMPLES:: sage: sage.calculus.calculus.mapped_opts(True) 'true' sage: sage.calculus.calculus.mapped_opts(False) 'false' sage: sage.calculus.calculus.mapped_opts('bar') 'bar' """ if isinstance(v, bool): return str(v).lower() return str(v)
c471977f34ea5a3ff1bbee4535530f42f2a0c2c4
82,609
def dm_subset(dm_sq, idxs): """Get subset of distance matrix given list of indices Args: dm_sq (numpy.array): squareform distance matrix from pdist idxs (list of int): list of indices Returns: numpy.array: subset of `dm_sq` with `shape == (len(idxs), len(idxs))` """ return dm_sq[idxs][:,idxs]
596218a906502bcf878f7ec8b824afdb38fe76f9
82,612
def user_permissions(request): """Return a dictionary of permissions for the current user :request: a WSGI request with url data ``resource`` and ``action``. """ auth = request.app.auth resources = request.url_data.get('resource', ()) actions = request.url_data.get('action') return auth.get_permissions(request, resources, actions=actions)
5c6d4ee1a06b9270ee24811b66740f68672096d3
82,613
def get_next_version(req_ver, point_to_increment=-1): """Get the next version after the given version.""" return req_ver[:point_to_increment] + (req_ver[point_to_increment] + 1,)
3a7f4356df43732fc0f03620615ecfead9a17d79
82,615
def _get_all_ret_events_after_time(masters, minions, event_listener, start_time): """ Get all the ret events that happened after `start_time` """ minion_pattern = "salt/job/*/ret/{}" events = [] for minion in minions: tag = minion_pattern.format(minion.id) matchers = [(master.id, tag) for master in masters] ret_events = event_listener.get_events(matchers, after_time=start_time) events.append(ret_events) return tuple(events)
0797f6590c931a41e0980236297600b212109e54
82,617
import csv def read_traces_csv(filename): """Read a .csv file containing deduplicated logic analyser traces. The file should contain two columns: nanosecond start time and byte value. The byte value should be assumed to persist on the bus until the start time in the next record. Args: filename: .csv file to read. Returns: A list of (start time, byte value) tuples. """ traces = [] with open(filename) as f: for timestamp, byte in csv.reader(f): traces.append((int(timestamp), byte)) return traces
28e475eccd09d93f2097231633029e3adc027960
82,633
def argparser(params): """For commands that use pagination, mods, and a variable first parameter. Returns `(<first param>, page, mod)`. The variable parameter *must* come first. Defaults are: - `param_1 = None` - `page = 1`, any `int` less than 1000 - `mod = None`, valid mods are `["NM", "HD", "HR", "DT", "FM"]`.""" param_1 = None page = 1 mod = None #try to determine what the mods and pages are, last param to first #if we parse something that's clearly not a number or a mod we assume the remainder #composes the first parameter for index in range(1,len(params)+1): if params[-index].upper() in ["NM", "HD", "HR", "DT", "FM"]: #fields in mongodb are case-sensitive mod = params[-index].upper() elif params[-index].isdigit() and int(params[-index])<1000: page = int(params[-index]) else: #the remainder is assumed to be the first parameter, at which point we stop if index == 1: #slicing from zero doesn't work param_1 = " ".join(params) else: param_1 = " ".join(params[:-(index-1)]) break return (param_1, page, mod)
578f496cd40f66fc31e311653c3ba8a195ec58a5
82,636
import socket def ipv4(value): """Validate an IPv4 address""" try: socket.inet_aton(value) if value.count(".") == 3: return value except socket.error: pass raise ValueError("{0} is not a valid ipv4 address".format(value))
91619bf970ccdaa6cf7c730550e255ea648ac790
82,637
from typing import List def get_subarray_indices(subarray, array) -> List[int]: """ Get the start index and length of a subarray or subsequence Args: subarray ([type]): Subsequence array ([type]): Original sequence Returns: List: First argument is the start index and second is the length of the sequence """ window_size = len(subarray) for i in range(len(array) - window_size + 1): if subarray == array[i:i+window_size]: return [i, window_size] return [-1, -1]
74182cf7886088e2c70b58569fd19734e69936df
82,639
import re def urls_for_zone(zone, location_to_urls_map): """Returns list of potential proxy URLs for a given zone. Returns: List of possible URLs, in order of proximity. Args: zone: GCP zone location_to_urls_map: Maps region/country/continent to list of URLs, e.g.: { "us-west1" : [ us-west1-url ], "us-east1" : [ us-east1-url ], "us" : [ us-west1-url ], ... } """ zone_match = re.match("((([a-z]+)-[a-z]+)\d+)-[a-z]", zone) if not zone_match: raise ValueError("Incorrect zone specified: {}".format(zone)) # e.g. zone = us-west1-b region = zone_match.group(1) # us-west1 approx_region = zone_match.group(2) # us-west country = zone_match.group(3) # us urls = [] if region in location_to_urls_map: urls.extend([ url for url in location_to_urls_map[region] if url not in urls ]) region_regex = re.compile("([a-z]+-[a-z]+)\d+") for location in location_to_urls_map: region_match = region_regex.match(location) if region_match and region_match.group(1) == approx_region: urls.extend([ url for url in location_to_urls_map[location] if url not in urls ]) if country in location_to_urls_map: urls.extend([ url for url in location_to_urls_map[country] if url not in urls ]) return urls
7f85088d034795509be52373874cbf316fd31f24
82,642
import struct def read_len(fp, ignoreEOF=False, verbose=False): """ This reads a length from the stream. If the ignoreEOF flag is set, a failure to read the length simple results in a None being returned (vs. an exception being thrown) """ lbytes = fp.read(4) #if verbose: # print "Raw length bytes: "+str(repr(lbytes)) if len(lbytes)!=4: if ignoreEOF: return None else: # pragma no cover raise IOError("Failed to read length data") up = struct.unpack('!L', lbytes) return up[0]
7396c723fa7301b431e0bcba56d801e6f9816726
82,647
import hashlib def file_checksum(path_or_obj, checksum_type): """Get the checksum of the given file. Args: path_or_obj (str): File path to checksum OR an opened file object checksum_type (str): Supported values are 'md5', 'sha1', 'sha256'. Returns: str: Hexadecimal file checksum """ # pylint: disable=redefined-variable-type if checksum_type == 'md5': hash_obj = hashlib.md5() elif checksum_type == 'sha1': hash_obj = hashlib.sha1() elif checksum_type == 'sha256': hash_obj = hashlib.sha256() else: raise NotImplementedError( "No support for generating checksum type {0}" .format(checksum_type)) # Is it a file or do we need to open it? try: path_or_obj.read(0) file_obj = path_or_obj except AttributeError: file_obj = open(path_or_obj, 'rb') blocksize = 65536 try: while True: buf = file_obj.read(blocksize) if len(buf) == 0: break hash_obj.update(buf) finally: if file_obj != path_or_obj: file_obj.close() return hash_obj.hexdigest()
92ad40d20e463c284f80eb33adbe83bd7731528f
82,650
def _expanded_shape(ndim, axis_size, axis): """ Returns a shape with size = 1 for all dimensions except at axis. """ return tuple([axis_size if i == axis else 1 for i in range(ndim)])
90de4b9987fccc5e71c35388bddca3c8af36db76
82,651
def filter_captions(images_with_sd): """Remove images that already have captions from recommendations.""" recs = {} for i in images_with_sd: if images_with_sd[i]['sd'] != 'exists': recs[i] = images_with_sd[i] return recs
eec40575e28ff1d52d0bdca106815cbfd64febcf
82,653
def _diff_msg(msg, elem1, elem2): """Creates a message for elements that differ in an assertion.""" return msg + ": " + str(elem1) + " != " + str(elem2)
1ebf7f214a69b1bc1ec7528e2a33d8fa7be3f2cb
82,661
def strip_meta(value: dict) -> dict: """Strip the "_meta" node from dict, recursively.""" result = {} for k, v in value.items(): if k == "_meta": continue if isinstance(v, dict): result[k] = strip_meta(v) else: result[k] = v return result
88cd5bc6667823043a6231a8c35c9bf022e6d33b
82,662
import typing def _encode_default(obj: typing.Any) -> typing.Any: """ 'default' callback for json.encode, encodes set() as a sorted list """ if isinstance(obj, set): return sorted(obj, key=lambda v: (type(v).__name__, v)) elif isinstance(obj, bytes): return obj.decode() raise TypeError(obj)
00b8db5825710aa6c52cd776f61b001b4512cc30
82,663
import configparser import click def get_config_item(config, section, item): """ Get the value of a config item and throws a ClickException if it doesn't exist """ try: return config.get(section, item) except (configparser.NoOptionError, configparser.NoSectionError, configparser.InterpolationSyntaxError): error_msg = ('The item "{0}" is not set correctly in the "{1}" section ' 'in your config file.'.format(item, section)) raise click.ClickException(error_msg)
0563b42cf196c18bdcbdf5799be3ae86b39c42e9
82,664
def trunc(text): """ Truncates / right pads supplied text """ if len(text) >= 30: text = text[:30] return '{:<30}...'.format(text) return '{:<30} '.format(text)
bf8882664c8b5a1922822fc29bbaf3d6f27addbf
82,665
from typing import List def calib_repos() -> List[str]: """ Provides a plain list of urls pointing to all calib files of KITTI dataset. These are also available at the address here_. .. _here: http://www.cvlibs.net/datasets/kitti/raw_data.php """ repos = [ "https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/2011_09_30_calib.zip", "https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/2011_09_29_calib.zip", "https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/2011_09_26_calib.zip", "https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/2011_10_03_calib.zip", "https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/2011_09_28_calib.zip", ] return repos
0c6b7a07a05a8457025ce2214925b8d9785deabf
82,666
import re def _parse_placename(placename: str) -> str: """ trim `the` Parameters ---------- placename : str valid placename from FFXIV starts with `the` Returns ------- str trimmed placename """ placename = placename.lower() check_placename = re.search('^the (.*)', placename) if check_placename: placename = ''.join(check_placename.groups()) return placename
89de04c2d510d398e9fd74522b78ea87544dac47
82,677
import re def ismultibyte(value): """ Return whether or not given value contains one or more multibyte chars. If the value contains one or more multibyte chars, this function returns ``True``, otherwise ``False``. Examples:: >>> ismultibyte('あいうえお foobar') True >>> ismultibyte('abc') False :param value: string to validate one or more multibyte chars """ multi_byte = re.compile(r"[^\x00-\x7F]") return bool(multi_byte.match(value))
866c6c0cf1b1b09d66f50b141ba0cd1b8badbc9d
82,679
import logging def initialise_logger(name, logfile, file_format_str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s', print_to_console = False, console_format_str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level = "INFO"): """ Initialise a logger object (from the logging module), to write logs to a specified logfile. Parameters ---------- name : str logger name. logfile : str name/path to the log file. file_format_str : str, optional Specifies the format of the log messages to the log file (see documentation of logging module). The default is '%(asctime)s - %(name)s - %(levelname)s - %(message)s'. print_to_console : bool, optional Switch to print log messages to console. The default is True. console_format_str : str, optional When `print_to_console == True`, this specifies the format of the log messages to the console (see documentation of logging module). The default is '%(asctime)s - %(name)s - %(levelname)s - %(message)s'. Returns ------- logger : logger object """ # basic configurations #===================== logging.basicConfig(level=getattr(logging,level)) logger = logging.getLogger(name) # file log handler #================= file_log_handler = logging.FileHandler(logfile) file_log_handler.setFormatter(logging.Formatter(file_format_str)) logger.addHandler(file_log_handler) # console log handler #==================== if print_to_console: console_log_handler = logging.StreamHandler() console_log_handler.setFormatter(logging.Formatter(console_format_str)) logger.addHandler(console_log_handler) return logger
03cabae7d23e19485643729fbc727b44345bc2f5
82,681
from typing import Optional def get_scope_note(term) -> Optional[str]: """Get the scope note from the preferred concept in a term's record.""" for concept in term['concepts']: if 'ScopeNote' in concept: return concept['ScopeNote']
e5a3e2c9b3de8e68cd1f277dfd4a3e756ecd6fe1
82,685
def get_set_header(set_file): """ Given a .set filename, extract the first few lines up until and including the line with `sw_version`. Parameters ---------- set_file : str or Path Full filename of .set file --- Largely based on gebaSpike implementation by Geoff Barrett https://github.com/GeoffBarrett/gebaSpike """ header = '' with open(set_file, 'r+') as f: for line in f: header += line if 'sw_version' in line: break return header
c08dacb771fa88e2e09cacd29bbdea6dec2df5cc
82,686
from typing import Counter def ast_frequency_dist(seqs, discard_first=0): """ Distribution of asteroid frequencies. List of (x, y), where: x: number of sequences (among `seq`) that contain a given asteroid; y: number of distinct asteroids with that level of occurrence. Optionally discards an initial sub-sequence of asteroids that may necessarily repeat across sequences (`discard_first`). Examples -------- >>> ast_frequency_dist([(1,2,3), (3,4,5,6), (1,3,4)]) [(1, 3), (2, 2), (3, 1)] # 3 asteroids appear in only one sequence, 2 in 2, and 1 in all 3 (ast. 3) """ ast_freq = Counter([a for s in seqs for a in s[discard_first:]]) return sorted(Counter(ast_freq.values()).items())
9a58e0ff1d862b6df45a991e2ced537ff7fbfee4
82,688
def dataset_part_filename(dataset_part, num_data): """Returns the filename corresponding to a train/valid/test parts of a dataset, based on the amount of data samples that need to be parsed. Args: dataset_part: String containing any of the following 'train', 'valid' or 'test'. num_data: Amount of data samples to be parsed from the dataset. """ if num_data >= 0: return '{}_data_{}.npz'.format(dataset_part, str(num_data)) return '{}_data.npz'.format(dataset_part)
c5b2daae6e6254ad9d0c457b5217bbe0969a13ce
82,694
def cat_days(series, cat=1): """ Return count of days where category == cat """ return series[series == cat].count()
bb07bc6059d0e5c2de7fc0d0fbc4475db9ed83b3
82,697
def zone_url_to_name(zone_url): """Sanitize DNS zone for terraform resource names zone_url_to_name("mydomain.com.") >>> "mydomain-com" """ return zone_url.rstrip(".").replace(".", "-")
48bd51c46c38b7655823f39d718705a71f5825db
82,698
def serialize_value(value): """Attempts to convert `value` into an ISO8601-compliant timestamp string. If `value` is ``None``, ``None`` will be returned. Args: value: A datetime.datetime value. Returns: An ISO8601 formatted timestamp string. """ if not value: return None return value.isoformat()
6bbd9f9d0d182290ee5e52010920ed14189b8990
82,700
import re def validate_certificate_subject(subject): """ Duplicate the get_subject validation logic defined in: sysinv/api/controllers/v1/kube_rootca_update.py Returns a tuple of True, "" if the input is None Returns a tuple of True, "" if the input is valid Returns a tuple of False, "<error details>" if the input is invalid """ if subject is None: return True, "" params_supported = ['C', 'OU', 'O', 'ST', 'CN', 'L'] subject_pairs = re.findall(r"([^=]+=[^=]+)(?:\s|$)", subject) subject_dict = {} for pair_value in subject_pairs: key, value = pair_value.split("=") subject_dict[key] = value if not all([param in params_supported for param in subject_dict.keys()]): return False, ("There are parameters not supported " "for the certificate subject specification. " "The subject parameter has to be in the " "format of 'C=<Country> ST=<State/Province> " "L=<Locality> O=<Organization> OU=<OrganizationUnit> " "CN=<commonName>") if 'CN' not in list(subject_dict.keys()): return False, ("The CN=<commonName> parameter is required to be " "specified in subject argument") return True, ""
2181e06b8c0123fe1a823d32f9f21f8f2515df5a
82,705
def int2str(i, length=None): """converts a string to an integer with the same bit pattern (little endian!!!). If length is specified, the result is zero-padded or truncated as neccesary.""" s = "" while i != 0: s += chr(i & 0xFF) i >>= 8 s = s[::-1] if length is not None: sl = len(s) if sl < length: return "\x00" * (length - sl) + s elif sl > length: return s[0:length] return s
b2f30ffa8d3f45a18697950aaf89122aad74ce8a
82,706
import traceback def format_result_exception(result): """Format a `runner.invoke` exception result into a nice string repesentation.""" if getattr(result, "exc_info", None): stacktrace = "".join(traceback.format_exception(*result.exc_info)) else: stacktrace = "" return f"Stack Trace:\n{stacktrace}\n\nOutput:\n{result.output}"
c6cc2943f8f6dcefd7f4a35f7fe2438c3ee499ba
82,707
def get_full_message_size_unit(simple_unit: str) -> str: """Convert simple message size unit prefix to full abbreviation.""" return { 'k': 'KiB', 'm': 'MiB', }[simple_unit]
c1b902de64f8813da4863b2d4708149ac4a24262
82,708
def vectorize_with_doc2vec(text, model): """Genereate a vector representation of a text string using Doc2Vec. Args: text (str): Any arbitrary text string. model (gensim.models.Doc2Vec): A loaded Doc2Vec model object. Returns: numpy.Array: A numerical vector with length determined by the model used. """ vector = model.infer_vector(text.lower().split()) return(vector)
5f4b49d4f2119cea74bb21f1dc5093a09588128f
82,710
def _lookup_attributes(glyph_name, data): """Look up glyph attributes in data by glyph name, alternative name or production name in order or return empty dictionary. Look up by alternative and production names for legacy projects and because of issue #232. """ attributes = ( data.names.get(glyph_name) or data.alternative_names.get(glyph_name) or data.production_names.get(glyph_name) or {} ) return attributes
2706f0da8d1fc780c7afb8cf4251d24b09e0fe88
82,713
import textwrap import re def unwrap(text: str) -> str: """ Unwraps multi-line text to a single line """ # remove initial line indent text = textwrap.dedent(text) # remove leading/trailing whitespace text = text.strip() # remove consecutive whitespace return re.sub(r"\s+", " ", text)
107c192765c798216ccc49b972c8d0fa49c4a470
82,714
def truncate_data(perc, dataset): """Truncate the training dataset. Args: perc: float between 0 and 1, percentage of data kept. dataset: data, under the form (x_train, y_train), (x_test, y_test) Returns: dataset: truncated training dataset, full test dataset """ (x_train, y_train), (x_test, y_test) = dataset n = x_train.shape[0] n_trunc = int(perc * n) return (x_train[:n_trunc, :], y_train[:n_trunc, :]), (x_test, y_test)
09805d58ca3bbd9bbab6eb96083a11f41cbb0d3f
82,717
def replace_operators(str): """Returns string with mathematical operators put back""" str = str.replace('_p_','+') str = str.replace('_m_','-') str = str.replace('_t_','*') str = str.replace('_d_','/') return str
634d2a181953731eacaac1bab52be5c82404f579
82,719
def index_array(col): """ Assemble an array of (head, text) tuples into an array of {"section_head": head, "section_text": text, "section_idx": i} """ indexed_text = [{"section_head": h, "section_text": t, "section_idx": i} for i, (h, t) in enumerate(col)] return indexed_text
5bce91960a389bb78df1287158d3c2ff269c1058
82,721
def parse_input(input_string): """Return `input_string` as an integer between 1 and 6. Check if `input_string` is an integer number between 1 and 6. If so, return an integer with the same value. Otherwise, tell the user to enter a valid number and quit the program. """ if input_string.strip() in {"1", "2", "3", "4", "5", "6"}: return int(input_string) else: print("Please enter a number from 1 to 6.") raise SystemExit(1)
c15c51319492dc871f24e8f2d22a1d43d115232d
82,723
import hashlib def file_sha256(src_file_path: str) -> str: """Calculate the SHA256 hash of the given file. Args: src_file_path: Path to the file to be hashed. Returns: SHA256 hash of the file in hex digest format. """ sha256sum = hashlib.sha256() with open(src_file_path, "rb") as src_file: buf = src_file.read(65536) while buf: sha256sum.update(buf) buf = src_file.read(65536) return sha256sum.hexdigest()
18669bc466e0f27f4547deaa797703b145dedb8f
82,726
def file_serializer(obj): """Serialize a object. :param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance. :returns: A dictionary with the fields to serialize. """ return { "id": str(obj.file_id), "filename": obj.key, "filesize": obj.file.size, "checksum": obj.file.checksum, }
d176ee435d23b68a2328e88dca6a5eaf89af86e6
82,727
def add_memory_arg(parser): """Add the memory argument to a parser""" def _validator(val): """Validate the memory value to ensure it is between 128 and 3008 and a multiple of 64""" error = ( 'Value for \'memory\' must be an integer between 128 and 3008, and be a multiple of 64' ) try: memory = int(val) except ValueError: raise parser.error(error) if not 128 <= memory <= 3008: raise parser.error(error) if memory % 64 != 0: raise parser.error(error) return memory parser.add_argument( '-m', '--memory', required=True, help=( 'The AWS Lambda function max memory value, in megabytes. ' 'This should be an integer between 128 and 3008, and be a multiple of 64.' ), type=_validator )
45a3fc09f06e78cb09049949af1e3acb4639ecb2
82,731
def submatrix(M, row, col): """ Compute the matrix obtained from M by removing the row-th row and the col-th column. This submatrix is then useful for computing the determinant and adjugate matrix of M. Input: M A matrix row The index of the row to remove col The index of the column to remove Output: rep A submatrx Format: Any """ nrow = len(M) ncol = len(M[0]) rep = [[M[i][j] for j in range(ncol) if j != col] for i in range(nrow) if i != row] return rep
da3b00b391bc0f23ce14f6d4b12371a9440c066f
82,732
def get_semantic_descriptor(keyword, sentence): """ (str, list) -> dict The function takes as input a string representing a single word and a list representing all the words in a sentence. It returns a dictionary representing the semantic descriptor vector of the word computed from the sentence. >>> s1 = ['hello', 'to', 'anyone', 'reading', 'this', 'short', 'example'] >>> desc1 = get_semantic_descriptor('reading', s1) >>> desc1['hello'] 1 >>> len(desc1) 6 >>> 'everyone' in desc1 False >>> s2 = ['no', 'animal', 'must', 'ever', 'kill', 'any', 'other', 'animal'] >>> desc2 = get_semantic_descriptor('animal', s2) >>> desc2 == {'no': 1, 'must': 1, 'ever': 1, 'kill': 1, 'any': 1, 'other': 1} True >>> get_semantic_descriptor('animal', s1) {} >>> s3 = ['jingle', 'bells', 'jingle', 'bells', 'jingle', 'all', 'the', 'way'] >>> desc3 = get_semantic_descriptor('way', s3) >>> len(desc3) == 4 True >>> desc3 == {'jingle' : 3, 'bells' : 2, 'all' : 1, 'the' : 1} True >>> 'bell' in desc3 False """ # initialize an empty dictionary semantic_descriptor = {} # in case the keyword is not part of the sentence if keyword not in sentence: return semantic_descriptor # in case the keyword is part of the sentence else: # count the occurence of each word in sentence for word in sentence: if word == keyword: continue elif word in semantic_descriptor: semantic_descriptor[word] += 1 else: semantic_descriptor[word] = 1 # return the word's semantic descriptor vector return semantic_descriptor
568f762bf6cffef7e2a9f7b410391b3af02865fd
82,737
from typing import Dict def _filter_out_none_values_recursively(dictionary: Dict) -> Dict: """Return copy of the dictionary, recursively omitting all keys for which values are None. >>> _filter_out_none_values_recursively({"k1": "v1", "k2": None, "k3": {"k4": "v4", "k5": None}}) {'k1': 'v1', 'k3': {'k4': 'v4'}} """ return { k: v if not isinstance(v, Dict) else _filter_out_none_values_recursively(v) for k, v in dictionary.items() if v is not None }
0a5f9670d8b6971d4f9076abf4443bbb0fe79257
82,738
from functools import reduce import operator def _vector_mult(*vecs): """For any number of length-n vectors, pairwise multiply the entries, e.g. for x = (x_0, ..., x_{n-1}), y = (y_0, ..., y_{n-1}), xy = (x_0y_0, x_1y_1, ..., x_{n-1}y{n-1}).""" assert(len(set(map(len, vecs))) == 1) #Проверяем что все вектора из vecs одинаковой длины return [reduce(operator.mul, a, 1) for a in zip(*vecs)]
54897918276e12470febc620219a7ce890390015
82,740
def get_size(bytes): """ Convert Bytes into Gigabytes 1 Gigabytes = 1024*1024*1024 = 1073741824 bytes """ factor = 1024 ** 3 value_gb = bytes / factor return value_gb
f24ddae10323e9e04425e1e9b877c917bf3a56b5
82,744
def run_dqm_and_collect_solutions(dqm, sampler): """ Send the DQM to the sampler and return the best sample found.""" # Initialize the solver print("\nSending to the solver...") # Solve the DQM problem using the solver sampleset = sampler.sample_dqm(dqm, label='Example - Immunization Strategy') # Get the first solution sample = sampleset.first.sample return sample
6cd08620af1a5044570eb87fea105c6c8a532af8
82,750
def locate_geometry(path, fname): """ Pull file path and name from the input file (command line argument) Args: path (str): string containing the directory of the stove geometry file fname (str): string with the filename of the geom file (with extension) Returns: file_path (str): full file path for input stove geometry defined by user in input yaml file.s """ file_path = path + '//' + fname print(file_path) #else: return file_path
891afe6e7aeae1b1e1cf4d0fa48c446f19f8cb89
82,752
def update_txt_docx(pt_txt, pt_docx_list, p, clean, print_p_by_p=False): """ This function is a factor function called by epilepsy_docx. 1. pt_txt and pt_docx_list are updated paragraph by paragraph. 2. p is the paragraph contents from docx Document class. 3. clean option strips the text 4. print_p_by_p is an option to print out the text as it is being read. """ pt_txt = pt_txt + '\n' + p.text pt_docx_list.append(p.text) if print_p_by_p: print(p.text) if clean: pt_txt = pt_txt.strip() return pt_txt, pt_docx_list
ada1c7848d7efd38ff2e1080c5095f6b97b63f0f
82,754
def ReverseComplement(seq): """Returns reverse complement of sequence. Preserves upper/lower case.""" d = {'A':'T', 'T':'A', 'C':'G', 'G':'C', 'a':'t', 't':'a', 'c':'g', 'g':'c', 'N':'N', 'n':'n', 'S':'S', 's':'s'} rc = [d[nt] for nt in seq] rc.reverse() return ''.join(rc)
e779ef45656da7c49e8fb78df297b445e04300df
82,756
def compare_cluster(clust0, clust1, fields=("mult","prototype")): """Given two clusters, compare the multiplicity and prototype to determine whether they describe the same set of sites. This should work fine, but there's no guarantee from casm that the prototype will match across different basis sets. :clust0: json :clust1: json :returns: bool """ is_equal=True for prop in fields: if clust0[prop]!=clust1[prop]: is_equal=False return is_equal
78c160d1aeb761c3ff7095257b938162422ef45f
82,760
import re def _findFirst( pattern, src ): """ A helper function that simplifies the logic of using regex to find the first match in a string. """ results = re.findall( pattern, src ) if len(results) > 0: return results[0] return None
ced30ea0a31e22c0e78157ea193243ff04816b10
82,762
import inspect def is_persistent_class(obj): """Checks if an object is a class that is persisted in the Krake database. Args: obj: the given object to check. Returns: bool: True if the object given is a class persisted in the Krake database, False otherwise """ return inspect.isclass(obj) and hasattr(obj, "__etcd_key__")
3b5fc18fc248f8960dd69e06e46b093de1ff05d7
82,765
def convert_float(s): """ Convert the string data field *s* to a float. If the value is 99999 (missing data) or 88888 (not observed), return not a number. """ f = float(s) if int(f) in [99999, 88888]: return float('nan') return f
896e9430a097f7ff06ffa6990ab02bdcc9b31dca
82,768
import torch def empty_like(input, *args, **kwargs): """ In ``treetensor``, you can use ``ones_like`` to create a tree of tensors with all the uninitialized values of like another tree. Example:: >>> import torch >>> import treetensor.torch as ttorch >>> ttorch.empty_like(torch.randn(2, 3)) # the same as torch.empty_like(torch.randn(2, 3), 2.3) tensor([[-3.6515e+14, 4.5900e-41, -1.3266e-36], [ 3.0802e-41, 4.4842e-44, 0.0000e+00]]) >>> ttorch.empty_like({ ... 'a': torch.randn(2, 3), ... 'b': {'x': torch.randn(4, )}, ... }) <Tensor 0x7ff363bbc780> ├── a --> tensor([[-3.6515e+14, 4.5900e-41, -3.6515e+14], │ [ 4.5900e-41, 1.1592e-41, 0.0000e+00]]) └── b --> <Tensor 0x7ff3d6f3cb38> └── x --> tensor([-1.3267e-36, 3.0802e-41, -3.8049e-38, 3.0802e-41]) """ return torch.empty_like(input, *args, **kwargs)
3c7ed2e636fb413ba9480b4991ab71d4bbc2a6aa
82,771
def calc_CT_load_from_chiller_load(COP_chiller, chiller_load_kW): """ calculates loads of cooling towers (CT) according to chiller loads :param COP_chiller: float :param chiller_load_kW: float :return: Q_CT_kW, float """ Q_CT_kW = chiller_load_kW * ((1 + COP_chiller) / COP_chiller) return Q_CT_kW
3d714d8840e72ea81dd1ec38713f04f55f2711c9
82,775
import torch def compute_normalization(data, one_hot_max_sizes): """ Compute the normalization parameters (i. e. mean to subtract and std to divide by) for each feature of the dataset. For categorical features mean is zero and std is one. i-th feature is denoted to be categorical if one_hot_max_sizes[i] >= 2. Returns two vectors: means and stds. """ norm_vector_mean = torch.zeros(len(one_hot_max_sizes)) norm_vector_std = torch.ones(len(one_hot_max_sizes)) for i, size in enumerate(one_hot_max_sizes): if size >= 2: continue v = data[:, i] v = v[1 - torch.isnan(v)] vmin, vmax = v.min(), v.max() vmean = v.mean() vstd = v.std() norm_vector_mean[i] = vmean norm_vector_std[i] = vstd return norm_vector_mean, norm_vector_std
f10845ea3b31479d595b4fbb3af62fdec544fe86
82,776
def flatten_lists_one_level(potential_list_of_lists): """ Wrapper to unravel or flatten list of lists only 1 level :param potential_list_of_lists: list containing lists :return: flattened list """ flat_list = [] for potential_list in potential_list_of_lists: if isinstance(potential_list, list): flat_list.extend(potential_list) else: flat_list.append(potential_list) return flat_list
2d391bf050b9ec7baca6493089d56b4b9f00a0c0
82,777
def docs_drop_param(docstring): """Drop the first parameter description for a string representation of a docstring. Parameters ---------- docstring : str Docstring to drop first parameter from. Returns ------- str New docstring, with first parameter dropped. Notes ----- This function assumes numpy docs standards. It also assumes the parameter description to be dropped is only 2 lines long. """ sep = '----------\n' ind = docstring.find(sep) + len(sep) front, back = docstring[:ind], docstring[ind:] for loop in range(2): back = back[back.find('\n')+1:] return front + back
a15b2caa961e80722c45c0600e5ae751a2b031b5
82,778