content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def calculate_transaction_revenue(trade_volume, last_price, brokerage_fee): """Calculates transaction revenue Parameters: trade_volume (float): the amount of stocks that the user wants to sell last_price (float): the last price of the stock brokerage_fee (float): price of the transaction Returns: transaction_revenue (float): the amount that that user earns from the transaction """ transaction_revenue = (trade_volume * last_price) - brokerage_fee return transaction_revenue
8c8ed78655a47bc9bea4f858a0b2c7773cc67791
90,125
def _data(arr, bitwidth=32): """Make a FuTIL-ready JSON data dict.""" return {'data': arr, 'bitwidth': bitwidth}
4527fedd1a4984b819aaff22fb050fbfb7d428ce
90,126
def reshape_to_vectors(x): """Reshape set of images to vectors.""" return x.reshape(x.shape[0], -1)
105eb2e8d6a1e3ed289788837213583843e8001e
90,132
def variant_name(variant): """Return a human-readable string representation of variant.""" if variant is None: return '<default>' return variant
98b423ab860e7c2b112840812f9c5b61e9b8b62a
90,134
def mergeDictionaries(inputDict, separator=';'): """This function merges two or more dictionaries whereas values from different sources for the same key are combined by indicating the provenance. For example sourceA = {'a': 'val1'} and sourceB = {'a': 'val2'} will be merged into {'a': 'val1 (sourceA)\nval2 (sourceB)}. The given dictionary contains the two dictionaries with their respective names as keys (which will be used to indicate provenance) >>> mergeDictionaries({'sourceA': {'a': 'val1'}, 'sourceB': {'a': 'val2'} }) {'a': 'val1 (sourceA);val2 (sourceB)'} """ keyValues = {} for sourceName in inputDict: for key in inputDict[sourceName]: value = inputDict[sourceName][key] valueString = f'{value} ({sourceName})' if key in keyValues: keyValues[key].append(valueString) else: keyValues[key] = [valueString] outputDict = {} for k in keyValues: outputDict[k] = separator.join(keyValues[k]) return outputDict
069244b7e00bea2d7166d767b4f0cc06299f6257
90,135
def load_body_data(smpl_data, gender='female', idx=0, n_sh_bshapes=10): """ Loads MoSHed pose data from CMU Mocap (only the given idx is loaded), and loads all CAESAR shape data. Args: smpl_data: Files with *trans, *shape, *pose parameters gender: female | male. CAESAR data has 2K male, 2K female shapes idx: index of the mocap sequence n_sh_bshapes: number of shape blendshapes (number of PCA components) """ # create a dictionary with key the sequence name and values the pose and trans cmu_keys = [] for seq in smpl_data.files: if seq.startswith('pose_'): cmu_keys.append(seq.replace('pose_', '')) name = sorted(cmu_keys)[idx % len(cmu_keys)] cmu_parms = {} for seq in smpl_data.files: if seq == ('pose_' + name): cmu_parms[seq.replace('pose_', '')] = { 'poses': smpl_data[seq], 'trans': smpl_data[seq.replace('pose_', 'trans_')] } # load all SMPL shapes fshapes = smpl_data['%sshapes' % gender][:, :n_sh_bshapes] return (cmu_parms, fshapes, name)
4b1f240046388e0a0415ceb59384949355f03fcc
90,137
def get_channel_names_or_ids(metadata): """ Get a list of unique channel names, using ID instead if a name is null or duplicated. Parameters ---------- metadata : pd.DataFrame A DataFrame containing 'channels.name' and 'channels.id' columns Returns ------- actual_channel_names : list of str Unique channels names or IDs. """ actual_channel_names = [] unique_ids = metadata.drop_duplicates(subset='channels.id') name_value_counts = unique_ids['channels.name'].value_counts() for index in range(len(unique_ids.index)): row = unique_ids.iloc[index] channel_name = row['channels.name'] if not channel_name or name_value_counts[channel_name] > 1: actual_channel_names.append(row['channels.id']) else: actual_channel_names.append(channel_name) return actual_channel_names
1bdffa07c8de0bfc5bba5e997d162d7ef4ca44ca
90,140
def calc_probabilities(applications): """ calculate the probability of each application return list of the weight of each application showing how likely the application is to be chosen in comparison with others *the sum of the list is 1* """ sum_advantage = sum(app.get_advantage() for app in applications) return [app.get_advantage() / sum_advantage for app in applications]
1eb07de427ec2c2fee5ab31cfad898346ae4d070
90,141
def which_prize(points): """ Notifies a competitor of the prize they have won in a game, depending on the number of points they've scored """ prize = None if points <= 50: prize = "wooden rabbit" elif points <= 150: prize = None elif points <= 180: prize = "wafer-thin mint" elif points <= 200: prize = "penguin" if prize: return "Congratulations! You have won a {}!".format(prize) else: return "Oh dear, no prize this time."
fbc8d1aedf6be54c4d4b8e6baca516b55ea6ffaf
90,144
def to_table_name(stream_id): """Convert a stream ID into a table name""" return stream_id.replace('-', '_')
4b1accca35bef191e16e2679c299ae0c39da003e
90,146
from typing import Any def format_response_dict(audio_path: str, response: Any, stt_provider:str)->dict: """Formats the STT response as a dict to be written to json""" transcript = list() conf = list() words_confs = list() if stt_provider == "google": for i in range(len(response.results)): alt = response.results[i].alternatives[0] transcript.append(alt.transcript.strip()) conf.append(alt.confidence) # adds each word-confidence pair as a tuple to the `words_confs` list words_confs.extend( [(w_c.word, w_c.confidence) for w_c in list(alt.words)] ) elif stt_provider == "ibm": for result in response['results']: alt = result['alternatives'][0] transcript.append(alt['transcript'].strip()) conf.append(alt['confidence']) words_confs.extend( [(word, word_conf) for word, word_conf in alt['word_confidence']] ) # filter out hesitation tag transcript = " ".join(transcript).replace("%HESITATION ", "") return { "audio_path": audio_path, "transcript": transcript, "confidence": conf, "words_confidence": words_confs }
aee42a6ae6435f1da541cae52b038e89902f02c8
90,148
def filter_metadata(metadata, cells): """ Remove filtered cell from metadata dataframe. Args: metadata (pd.DataFrame): cell metadata. cells (list-like): list of cells to keep. Returns: (pd.DataFrame): filtered metadata. """ return metadata.loc[cells]
4e3754cafb07666056fffbb149e88072cbe7a1f7
90,149
def view_complete(value): """ Append necessary request values onto the url. """ return "{}?view_adult=true&view_full_work=true".format(value)
4ff560a38c488ae72f5e11cffe4c1eec62754845
90,150
def inputLoop(inputList): """ This is a simple function that takes as input a list of strings. The function is a while loop to catch one of the terms of the list from the command line. Verifies whether the input is in the input list, otherwise asks again. """ string = "" string += "Please choose one input from [" for i in inputList: string += i string += "," string = string[:-1] string += "]: " inputString = "" while True: inputString = input(string) if(inputString in inputList): return inputString else: continue
bf86c3ab2939b42144721820efd468737a73f4f7
90,155
def _FollowedByEmpty(row, index): """Returns true if all columns after the given index are empty.""" return not any(row[index + 1:])
2c7322ab7916af65cdca32f0c05374d48ac88bf0
90,157
def tags(tag_coll): """Serializes the given tags to a JSON string. :param set[str] tag_coll: a set of tags :return: a dictionary suitable for JSON serialization :rtype: dict """ return {'tags': sorted(tag_coll)}
e7112ae8f00da14b6d2a4ccd269275548ee04dc9
90,162
def sitearea_fromqcm(M, mpc, nmol=1): """Average area of a surface site Calculate the average area of a surface site from qcm data Parameters ---------- M : float Molecular mass in atomic mass units mpc : float Mass per cycle in ng/cm2 nmol : int, optional (default 1) Number of precursor molecules per unit formula of the solid Returns ------- float Average area of a surface site in sq. meters """ return M/(mpc*1e-5*6.022e23*nmol)
8d168d7d2475df4384c838dda08bf300404f26e7
90,170
def reshape_array(shape, array, flip_vertically=False, copy=False): """Reshape a flat array. Examples -------- >>> from landlab.utils.structured_grid import reshape_array >>> x = np.arange(12.) >>> y = reshape_array((3, 4), x) >>> y.shape == (3, 4) True >>> y array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.]]) >>> y.flags['C_CONTIGUOUS'] True >>> x[0] = -1 >>> y[0, 0] -1.0 >>> x = np.arange(12.) >>> y = reshape_array((3, 4), x, flip_vertically=True) >>> y array([[ 8., 9., 10., 11.], [ 4., 5., 6., 7.], [ 0., 1., 2., 3.]]) >>> y.flags['C_CONTIGUOUS'] False >>> x[0] = -1 >>> y[-1, 0] -1.0 """ reshaped_array = array.view() try: reshaped_array.shape = shape except ValueError: raise if flip_vertically: flipped_array = reshaped_array[::-1, :] if copy: return flipped_array.copy() else: return flipped_array else: if copy: return reshaped_array.copy() else: return reshaped_array
77d1f85a1be4c0987e396c54b7d31671db12ea37
90,173
def update_nested_dict(a, b): """ update nested dict `a` with another dict b. usage:: >>> a = {'x' : { 'y': 1}} >>> b = {'x' : {'z':2, 'y':3}, 'w': 4} >>> update_nested_dict(a,b) {'x': {'y': 3, 'z': 2}, 'w': 4} """ for k, v in b.iteritems(): if isinstance(v, dict): d = a.setdefault(k, {}) update_nested_dict(d, v) else: a[k] = v return a
f9f2b91a43c041d58f74a7b50e7a2dabf5972006
90,175
def hello_user(name): """Function that returns a string that says hello to the name that you introduce Args: name (String): Name that you want to add to the string Returns: String: It returns '¡Hola {name}!' """ return '¡Hola ' + name + '!'
2206f6f2256bd465d5a5a5f0b6b71d2e559ecf35
90,183
def chunk(obj, max_length): """ A wrapped recursive function to chunk a list/string/... into pieces under a certain length/size, i.e. given a very long string, break it into numerous smaller strings, all under max_length: $ result = utils.chunk(["aa bb cc dd ee ff gg", ], 0, 5) $ ['aa bb', ' cc d', 'd ee ', 'ff gg'] or given a long list, break it into numerous smaller lists: $ result = utils.chunk(["1", "2", "3", "4", "5", "6"], 0, 5) $ [ ["1", "2", "3", "4", "5",], ["6", ] ] """ def _c(l, i, max_length): while(len(l[i])>max_length): l.append((l[i])[max_length:]) l[i] = (l[i])[:max_length] _c(l, i+1, max_length) return l return _c([obj, ], 0, max_length)
5aaf241f63f05a4bd0194fa536f9a0bfb53c7aa1
90,187
def mean(data): """Calculate the mean of a list of numbers Parameters: *data* a list of numbers whose mean to calculate """ return float(sum(data))/len(data)
ecc8809dc3a33e4bb451b892b4c7348b87a895b3
90,193
def generate_ordered_sequences(waypoint_lists): """ Given an ordered list of lists of possible choices, generate all possible ordered sequences. """ sequences = [] if len(waypoint_lists) == 0: return [] if len(waypoint_lists) > 1: for node in waypoint_lists[0]: for child in generate_ordered_sequences(waypoint_lists[1:]): if type(child) == list: sequences.append([node] + child) else: sequences.append([node] + [child]) else: sequences = waypoint_lists[0] return sequences
8bb0fe184b1c98bbfc769b90473b77719e59eec0
90,199
import typing from typing import OrderedDict import ast import itertools def _build_grid_search_commands(script: str, params: typing.Iterable[str]) -> typing.Iterable[typing.List[str]]: """ Build all grid search parameter configurations. :param script: String of command prefix, e.g. ``emloop train -v -o log``. :param params: Iterable collection of strings in standard **emloop** param form, e.g. ``'numerical_param=[1, 2]'`` or ``'text_param=["hello", "cio"]'``. """ param_space = OrderedDict() for arg in params: assert '=' in arg name = arg[:arg.index('=')] options = arg[arg.index('=') + 1:] options = ast.literal_eval(options) assert isinstance(options, list), options param_space[name] = options param_names = param_space.keys() commands = [] for values in itertools.product(*[param_space[name] for name in param_names]): command = str(script).split() for name, value in zip(param_names, values): command.append(str(name) + '="' + str(value) + '"') commands.append(command) return commands
b8ab61b8babdba187f2f3a6e010dbd03fe3e9160
90,200
import json def load_json(json_path): """Load json file as dictionary Args: json (str): path to json file Returns: dict: dictionary representation of json file """ with open(json_path, "r") as file: return json.load(file)
25a9a23335c9be66cd00eb66c081e2d75a02e350
90,204
import random def randbytes(count) -> bytes: """Return a random byte array""" return bytes([random.randint(0, 255) for index in range(count)])
0101e3f584c01f229471543d7bab0b051b56be6d
90,210
def apply_color_matrix(im, m): """Transform a color array with the given 3x3 matrix. Parameters ========== im : array of shape (...,3) Can be an image or a 1D array, as long as the last dimension is a 3-channels color. m : array of shape (3,3) Color matrix to apply. Returns ======= im : array of shape (...,3) Output array, where each input color vector was multiplied by m. """ # Another option is np.einsum('ij, ...j', m, im), but it can be much # slower, especially on on float32 types because the matrix multiplication # is heavily optimized. # So the matmul is generally (much) faster, but we need to take the # transpose of m as it gets applied on the right side. Indeed for each # column color vector v we wanted $v' = m . v$ . To flip the side we can # use $m . v = (v^T . m^T)^T$ . The transposes on the 1d vector are implicit # and can be ignored, so we just need to compute $v . m^T$. This is what # numpy matmul will do for all the vectors thanks to its broadcasting rules # that pick the last 2 dimensions of each array, so it will actually compute # matrix multiplications of shape (M,3) x (3,3) with M the penultimate dimension # of m. That will write a matrix of shape (M,3) with each row storing the # result of $v' = v . M^T$. return im @ m.T
8332429207cfc6b025a0cbccf57ce63680d3a3c2
90,217
def pre_hash(triple, contains_iter=True): """Prepare tuple to be hashed. This means that each element of the tuple will be converted to string. The first item (profile) should be iterable, whilst only the second and third items (positive and negative) will be considered as well, leaving the rest out of the tuple. Args: triple: Tuple with the profile items (iterable), positive item and its negative counterpart. contains_iter: Optional. If its true, sorts and transform each element of the first element in a truple with str. Returns: Same tuple but converted to string. Example: ([1, 2], 3, 4, 5) Becomes: (['1', '2'], '3', '4') If contains_iter is False: (1, 2, 3) Becomes: ('1', '2', '3') """ if not contains_iter: return (str(triple[0]), str(triple[1]), str(triple[2])) _sorted_t0 = tuple(sorted([str(_id) for _id in triple[0]])) return (_sorted_t0, str(triple[1]), str(triple[2]))
05c29793fd8b67432ba88786c2fbf4c78efb1e5e
90,222
def natural_size(num: float, unit: str = "B", sep: bool = True) -> str: """ Convert number to a human readable string with decimal prefix. :param float num: Value in given unit. :param unit: Unit suffix. :param sep: Whether to separate unit and value with a space. :returns: Human readable string with decimal prefixes. """ sep_char = " " if sep else "" for prefix in ("", "K", "M", "G"): if abs(num) < 1000.0: return f"{num:3.1f}{sep_char}{prefix}{unit}" num /= 1000.0 prefix = "T" return f"{num:.1f}{sep_char}{prefix}{unit}"
403f88bce8ababc860ea12f38ed6878406d9426d
90,224
def _parse_angl_line( line ): """Parse an AMBER frcmod ANGL line and return relevant parameters in a dictionary. AMBER uses angle and force constant, with the factor of two dropped. Here we multiply by the factor of two before returning. Units are degrees and kilocalories per mole.""" tmp = line.split() params = {} params['smirks'] = tmp[0] params['k'] = str(2*float(tmp[1])) params['angle'] = tmp[2] return params
4c9dfddd5709ccb3e3e68f891ff388b188e8e51d
90,225
import base64 def b64(inputStr): """ Encode the string to its base64 value. """ if inputStr is bytes: return base64.b64encode(inputStr) else: return base64.b64encode(inputStr.encode('utf-8'))
724acb851b71caeb5c3fb9408936ba22ed616a58
90,229
def get_TD_error(new_value, value, reward, has_finished, discount): """Return TD error. Args: new_value: Value at next state. value: Value at current state. reward: Reward in transitioning to next state. has_finished: If the game has finished. discount: Discount factor. Returns: The TD error. """ if has_finished: # when the game has been won, the reward itself is the total return, shouldn't bootstrap anymore. return reward - value else: return reward + discount * new_value - value
401a888e3da2ddd43b32989bc5c5a58b45d994e5
90,232
def histogram(s): """ Returns a histogram (dictionary) of the # of letters in string s. The letters in s are keys, and the count of each letter is the value. If the letter is not in s, then there is NO KEY for it in the histogram. Example: histogram('') returns {}, histogram('all') returns {'a':1,'l':2} histogram('abracadabra') returns {'a':5,'b':2,'c':1,'d':1,'r':2} Parameter s: The string to analyze Precondition: s is a string (possibly empty). """ # DICTIONARY COMPREHENSION #return { x:s.count(x) for x in s } # ACCUMULATOR PATTERN result = {} for x in s: result[x] = s.count(x) return result
2580040d031ae9520c458714787031cba17badd2
90,241
def _fix_floating_point(a): """Iterate through an array of dicts, checking for floats and rounding them.""" def get_type(thing): try: return thing['type'] except KeyError: return 'message' for thing in a: if get_type(thing) not in ['cbg', 'smbg']: for key, val in thing.items(): if isinstance(val, float): thing[key] = round(val, 3) return a
379d280bf56eca386e940623d6450f1c3f1824ef
90,243
def remove_suffix(input_string, suffix): """Returns the input_string without the suffix""" if suffix and input_string.endswith(suffix): return input_string[:-len(suffix)] return input_string
207b872b4b8a41f843df245a2cfcb756f750e508
90,244
def chop_text(dc, text, max_size): """ Chops the input `text` if its size does not fit in `max_size`, by cutting the text and adding ellipsis at the end. :param `dc`: a `wx.DC` device context; :param `text`: the text to chop; :param `max_size`: the maximum size in which the text should fit. """ # first check if the text fits with no problems x, y = dc.GetMultiLineTextExtent(text) if x <= max_size: return text for i in range(len(text)): s = text[:i] + '...' x, y = dc.GetTextExtent(s) last_good_length = i if x > max_size: return text[:last_good_length-1] + "..." return '...'
c36b6869da00701ccc7da96464d6f99a2ad2cb4a
90,247
def getColor(cnvtype, pathOrBen): """Return the shade of the item according to it's variant type.""" if cnvtype not in ["copy_number_loss","copy_number_gain"] or pathOrBen not in ["Benign", "Pathogenic"]: return "0,0,0" if cnvtype == "copy_number_loss": if pathOrBen == "Pathogenic": return "180,3,16" else: return "238,146,148" if cnvtype == "copy_number_gain": if pathOrBen == "Pathogenic": return "17,44,138" else: return "122,165,211"
f9f90c05788d9f18bed3f837b71b71a9126e5024
90,248
def radio_state(button): """Radio state. Parameters ---------- button : QtGui.QRadioButton the button state Returns ------- response : bool True (if Yes selected), False (if No selected) """ if button.isChecked(): return True else: return False
1246bceae2bd286658789a1357e709fab066995b
90,257
import json def read_coef(configfile="config_dbzhtorate.json"): """ Read Z-R and Z-S conversion coefficients from file. Keyword arguments: configfile -- json file containing coefficients Return: coef -- dictionary containing coefficients """ with open(configfile, "r") as jsonfile: data = json.load(jsonfile) coef = data['coef'] return coef
e27f5ee3b51387ae03b2b97cf1f02da7bfc7c483
90,259
import math def truncate(number, digits) -> float: """Truncate the input `number` to ` digits` digits.""" stepper = 10.0 ** digits return math.trunc(stepper * number) / stepper
1087d62b0ae92071cd3a9b12ddf4dea596f13c60
90,262
def get_prop(obj, prop, mytype='str'): """Get a property of a dict, for example device['uptime'], and handle None-values.""" if mytype == 'str': if prop in obj: if obj[prop] is not None: return obj[prop].encode('utf-8') return '' else: if prop in obj: if obj[prop] is not None: return obj[prop] return None
fd8caae164d81588fbb3c6989aee80ea869f3688
90,266
def set_attr(object, attribute, value): """ Sets the named attribute on the given object to the specified value. Then returns it. setattr(x, 'y', v) is equivalent to ``x.y = v'' :param object: The object :type object: ```Any``` :param attribute: The attribute :type attribute: ```str``` :param value: The value :type value: ```Any``` """ setattr(object, attribute, value) return object
1598fcee59da9fa74870295d7080b26950563c60
90,270
def str_digit_to_int(chr): """ Converts a string character to a decimal number. Where "A"->10, "B"->11, "C"->12, ...etc Args: chr(str): A single character in the form of a string. Returns: The integer value of the input string digit. """ # 0 - 9 if chr in ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"): n = int(chr) else: n = ord(chr) # A - Z if n < 91: n -= 55 # a - z or higher else: n -= 61 return n
63751c3f4828a23be44c44073e75817d7e885d94
90,271
def getadjacent(graph, vertex): """ vertex is the node indexnumber i.e 1, 2, 3, ... or 8 and returns all adjacent nodes For example: getadjacent(1) -> [2, 4, 6] getadjacent(2) -> [1, 3, 7] getadjacent(3) -> [2, 4, 8] .... """ nodes = [] for n1, n2 in graph['edges']: if n1 == vertex: nodes.append(n2) if n2 == vertex: nodes.append(n1) return sorted(nodes)
bb53103806daee7f641e1df31df8ad000ab10fde
90,284
def power3(n): """This function calculates the power of three of a given number.""" result = n**3 return result
5ab23c802d4da2281b9db1b45fa2061e31961798
90,289
def from_key_to_line_number(key): """ Takes a key and returns the line number in it :param key: The key to parse :return: line number """ n = key.split(".", 2)[1] # Sometimes line number contains a redundant "l" at the end ("Q005624.1l" for example), so we ignore it. if n[-1] == "l": n = n[:-1] if not n.isdigit(): return -1 line_number = int(n) return line_number
a9876d9779284e1c7882cd3ac669da294e835919
90,293
def calc_canopy_evaporation(pet, wetFrac): """ Calculate the evaporation from canopy interception storage Parameters ---------- pet : int or float Potential evapotranspiration [mm day^-1] wetFrac : float Wetted fraction of the canopy Returns ------- canopyEvap: float Evaporation from canopy interception storage [mm day^-1] """ canopyEvap = pet * wetFrac return canopyEvap
9a8387692feb1dc81ba3574fc55b7faad08c4705
90,295
def transpose_list(lst): """ From list of lists (of same length) lst Returns transposed list lst2 where lst[i][j] = lst2[j][i] """ if isinstance(lst[0], list): return list(map(list, zip(*lst))) return lst
c93c648a4c7a992a8df952be08639f9402e63fa5
90,297
def filter_features(features, geom_type='Polygon'): """Filter input GeoJSON-like features to a given geometry type.""" return [f for f in features if f['geometry']['type'] == geom_type]
e1a374c2090850a9662880653a282add0896c513
90,300
def mph(value): """ Convert kilometers per hour to miles per hour """ return round(value * 0.6214, 1)
9c16e8f9d78697f2e2a14b84ef25f91903c0f9d2
90,304
def get_value(lst, row_name, idx): """ :param lst: data list, each entry is another list with whitespace separated data :param row_name: name of the row to find data :param idx: numeric index of desired value :return: value """ val = None for l in lst: if not l: continue if l[0] == row_name: try: val = l[idx] except Exception: print(row_name, idx) print(lst) val = None break return val
7b9c59a4e27fcdb4e2168cfbed8cf97f9c7dd2e6
90,306
def register_to_signed_int(x, base=16): """ Modbus uses 16-bit integers, which can sometimes be treated as a signed 15-bit number. This handles converting a signed 16-bit number to a proper int Optionally can also work on other bases, but that's not normally needed """ if x & (1 << (base-1)): y = x - (1 << base) else: y = x return y
e3aca2004a7fc2e75c08bd8477f50cc2399c70e8
90,309
def pad_box(bounding_box, padding, image_shape): """ Add padding around given bounding box making sure not to exceed boundaries of the image. bounding_box is 4-tuple (min_row, min_col, max_row, max_col). Returns a new bounding_box as a 4-tuple. """ (min_row, min_col, max_row, max_col) = bounding_box min_row = max(min_row - padding, 0) min_col = max(min_col - padding, 0) max_row = min(max_row + padding, image_shape[0]) max_col = min(max_col + padding, image_shape[1]) return (min_row, min_col, max_row, max_col)
c08f560d8cbe451a7c649136ea39b5c42b082188
90,312
def db_url(request): """ Database URL used in sqlalchemy.create_engine Use ``--test-db-url`` pytest parameter or override this fixture in your test to provide your desired test database url. For valid urls see: http://docs.sqlalchemy.org/en/latest/core/engines.html Defaults to SQLite memory database. .. warning:: Ensure you are providing test database url since data will be deleted for each test function and schema will be recreated on each test run. """ return request.config.getoption('TEST_DB_URL')
a6e0e6175e29af7c32d438568a65ee4055b86865
90,313
def strip_string(phrase: str): """ The strip method removes leading and trailing whitespace from a string :param phrase: :return: """ return phrase.strip()
c64dbba80fd62f1ede764a396129687c7c13d692
90,318
def get_month_order(current_month): """ This function takes the current month integer (1=Jan, 12=Dec) and returns the correct order of months that should appear in the long term data sheets, including the four month average column and ticker column. For example: 1. get_month_order(4) should return the list ["ticker", "Apr (4)", "May (5)", "Jun (6)", "Jul (7)", "4 Month", "Aug (8)", "Sep (9)", "Oct (10)", "Nov (11)", "Dec (12)", "Jan (1)", "Feb (2)", "Mar (3)"] 2. get_month_order(11) should return the list ["ticker", "Nov (11)", "Dec (12)", "Jan (1)", "Feb (2)", "4 Month", "Mar (3)", "Apr (4)", "May (5)", "Jun (6)", "Jul (7)", "Aug (8)", "Sep (9)", "Oct (10)"] :param: current_month An integer representing the current month :return: [] As described above """ # two years of months hard coded (so the list has 24 items) months = ["Jan (1)", "Feb (2)", "Mar (3)", "Apr (4)", "May (5)", "Jun (6)", "Jul (7)", "Aug (8)", "Sep (9)", "Oct (10)", "Nov (11)", "Dec (12)", "Jan (1)", "Feb (2)", "Mar (3)", "Apr (4)", "May (5)", "Jun (6)", "Jul (7)", "Aug (8)", "Sep (9)", "Oct (10)", "Nov (11)", "Dec (12)"] # get 12 months starting from the input current month ordered_months = months[current_month-1:current_month+11] # add the 4 month column and ticker column ordered_months.insert(4, "4 Month") ordered_months.insert(0, "Ticker") # if the month is Sep (9), then add an empty column after the four month column if(current_month == 9): ordered_months.insert(6, "Empty") # otherwise, put the empty column after Dec (12) else: ordered_months.insert(ordered_months.index("Dec (12)") + 1, "Empty") # return the months return ordered_months
df15b290449b2cde5bc605a6cb470e3b60273875
90,320
def Btu_hftR2kJ_hmK(x): """Btu/(hr-ft-R) -> kJ/(h-m-K)""" return 6.231*x
a60b398292e5d883737339e6d7343656295b556c
90,321
def split_data(data): """ A function drops data and splits the target Parameters ---------- data - a pandas dataframe, including target to be dropped Return ------- A tuple of the observations, target """ features = data.drop(columns = ['target','education', 'fnlwgt', 'capital_gain', 'capital_loss']) target = data['target'] return features, target
feade16e82d74bd601000ad1073c9b06116f9c16
90,322
from typing import Optional from typing import Dict from typing import Iterable def stripped_by_keys(dictionary: Optional[Dict], keys_to_keep: Iterable) -> Dict: """Returns a dictionary containing keys and values from dictionary, but only keeping the keys in `keys_to_keep`. Returns empty dict if `dictionary` is None. Parameters ---------- dictionary : Optional[Dict] Dictionary to strip down by keys. keys_to_keep : Iterable Which keys of the dictionary to keep. Returns ------- Dict Dictionary containing only keys from `keys_to_keep`. """ dictionary = {} if dictionary is None else dictionary dictionary = {key: value for key, value in dictionary.items() if key in keys_to_keep} return dictionary
bcc2a511e8ad46d442a45d43c60ef3637a439ed9
90,328
def insertion_sort(lst): """ Sorts list from lowest to highest using insertion sort method In - takes in a list of integers Out - returns a list of sorted integers """ for i in range(1, len(lst)): j = i - 1 temp = int((lst[i])) while j >= 0 and temp < lst[j]: lst[j + 1] = lst[j] j = j - 1 lst[j + 1] = temp return lst
4b4b2a6e281b6a1f00c8ab5a889153cc0f516d6a
90,330
def compute_metric_deltas(m2, m1, metric_names): """Returns a dictionary of the differences of metrics in m2 and m1 (m2 - m1)""" return dict((n, m2.get(n, 0) - m1.get(n, 0)) for n in metric_names)
b76d29159ad5e221500131446294d76141b90615
90,331
def indent_sql(query: str) -> str: """ Indents an SQL query based on opening and closing brackets. """ indent = 0 lines = query.split("\n") for idx, line in enumerate(lines): lines[idx] = (" " * indent) + line if len(line) >= 1: if line[-1] == "(": indent += 4 elif line[-1] == ")": indent -= 4 return "\n".join(lines)
bcb74f535154684ba43ffa9130aadbbf567e0a22
90,332
import math def zscore(observed, expected, N): """ Computes z-score for the normal distribution :param observed: number of occurrences :param expected: number of occurrences :param N: sample size :return: """ observed = float(observed) / float(N) expected = float(expected) / float(N) return (observed-expected) / math.sqrt((expected*(1.0-expected))/float(N))
ee8e1da3df7e04287c0f8cc9cf9239b705b6c989
90,335
from typing import Callable def rename(new_name: str) -> Callable[[Callable], Callable]: """Rename a function.""" def _decorator(func: Callable) -> Callable: def _wrapper(*args, **kwargs): return func(*args, **kwargs) _wrapper.__name__ = new_name return _wrapper return _decorator
904300c2b3a2606b2c4b2fbfdbac4b9d252597e5
90,336
def walk(predicate, cursor): """Yield all nodes found by recursively visiting the AST """ return (c for c in cursor.walk_preorder() if predicate(c))
2682a3c70a54cdb058c46b99ee77a1fb6de8e071
90,337
def _init_well_info(well_info, wname): """Initialize well info dictionary for well """ if wname not in well_info.keys(): winfo = dict() winfo['welltype'] = 'Producer' winfo['phase'] = 'Oil' well_info[wname] = winfo return well_info
7525f5c13bf964e4e77b0aa80bae5b049469cbf3
90,341
import csv def csv_headers(csv_filepath, encoding="utf-8", delimiter=','): """ Parse a CSV file to a list of named tuples. The first row of the CSV is assumed to contain headers, which are used to generate the named tuple properties. Tuple property names are converted to lowercase and spaces are replaced with underscores, '_' Every entry must be populated """ with open(csv_filepath, encoding=encoding) as csv_file: reader = csv.reader(csv_file, delimiter=delimiter) headers = next(reader, None) return headers
e1b0d205010fb770c8fe6b201eac91f2894c0d6b
90,344
def display_error_output(output: dict): """ Transform a error output into a human readable str :param output: dict containing the error output :return str: a human readable string for the terminal """ txtoutput = "" txtoutput += f"{output['ename']} : {output['evalue']}\n" txtoutput += "".join(output["traceback"]) return txtoutput
c5756ab4fe81c8f829162b203ec6748015c75d8c
90,345
from typing import Optional import re def get_patch_notes_line_number(commentBody: str) -> Optional[int]: """ Returns the first integer from the comment body Returns None if no valid integer can be found """ comment_first_line = commentBody.partition("\n")[0] number_line_match = re.search(r"\d+", comment_first_line) if number_line_match is None: return None return int(number_line_match.group())
2dbf9dfa6aa927da35a884b3022255029f12e6ff
90,350
def validate_birth_year(passport: dict) -> bool: """ Return True if the birthyear in a password is between 1920 and 2002 DOCTEST >>> validate_birth_year(passport={"ecl": "gry", "byr": "1937"}) True >>> validate_birth_year(passport={"ecl": "gry", "byr": "1919"}) False >>> validate_birth_year(passport={"ecl": "gry", "byr": "2003"}) False >>> validate_birth_year(passport={"ecl": "gry"}) False """ birth_year = passport.get("byr") if not birth_year: return False if 1920 <= int(birth_year) <= 2002: return True return False
72ebf610ba16f0750cb9959a66c2a99f6affde6e
90,351
def is_variant(record): """ Return true if record is a variant Standard pyvcf `.is_snp` does not work correctly for GATK's GVCF format :param record: pyvcf record :return: """ alts = set(list(map(str, record.ALT))) non_variant_alts = {".", "<NON_REF>"} true_alts = alts - non_variant_alts return len(true_alts) >= 1
3053583de63f4f01d3ff551cf292796dfc4a5d59
90,352
def all_conditions_nominal(conditions): """ Check whether all conditions are nominal. If no conditions are specified, assume nominal. """ if not conditions: return True return all(c.nominal for c in conditions)
0d969b65476b9260fdbb0a2cf09b1b7c54f79c5f
90,359
def get_metadata() -> dict: """ Return a bunch of metadata (description, units, long_name, etc.) for the output dataset. Returns: ======== metadata: dict """ metadata = { "fmin_gpm": { "units": "", "long_name": "fmin_gpm", "description": "FMIN ratio for GPM.", }, "fmin_gr": { "units": "", "long_name": "fmin_gr", "description": "FMIN ratio for GR.", }, "refl_gpm_raw": { "units": "dBZ", "long_name": "GPM_reflectivity", "description": "GPM reflectivity volume-matched to ground radar.", }, "refl_gpm_grband": { "units": "dBZ", "long_name": "GPM_reflectivity_grband_stratiform", "description": "GPM reflectivity converted to ground radar frequency band.", }, "refl_gr_raw": { "units": "dBZ", "long_name": "reflectivity", "description": "Ground radar reflectivity volume matched using a `normal` average.", }, "refl_gr_weigthed": { "units": "dBZ", "long_name": "reflectivity", "description": "Ground radar reflectivity volume matched using a distance-weighted average.", }, "std_refl_gpm": { "units": "dB", "long_name": "standard_deviation_reflectivity", "description": "GPM reflectivity standard deviation of the volume-matched sample.", }, "std_refl_gr": { "units": "dB", "long_name": "standard_deviation_reflectivity", "description": "Ground radar reflectivity standard deviation of the volume-matched sample.", }, "sample_gpm": { "units": "1", "long_name": "sample_size", "description": "Number of GPM bins used to compute the volume-matched pixels at a given points", }, "reject_gpm": { "units": "1", "long_name": "rejected_sample_size", "description": "Number of GPM bins rejected to compute the volume-matched pixels at a given points", }, "sample_gr": { "units": "1", "long_name": "sample_size", "description": "Number of ground-radar bins used to compute the volume-matched pixels at a given points", }, "reject_gr": { "units": "1", "long_name": "rejected_sample_size", "description": "Number of ground-radar bins rejected to compute the volume-matched pixels at a given points", }, "volume_match_gpm": { "units": "m^3", "long_name": "volume", "description": "Volume of the GPM sample for each match points.", }, "volume_match_gr": { "units": "m^3", "long_name": "volume", "description": "Volume of the ground radar sample for each match points.", }, "x": { "units": "m", "long_name": "projected_x_axis_coordinates", "projection": "Azimuthal Equidistant from ground radar.", }, "y": { "units": "m", "long_name": "projected_y_axis_coordinates", "projection": "Azimuthal Equidistant from ground radar.", }, "z": { "units": "m", "long_name": "projected_z_axis_coordinates", "projection": "Azimuthal Equidistant from ground radar.", }, "r": { "units": "m", "long_name": "range", "description": "Range from ground radar.", }, "timedelta": { "units": "ns", "long_name": "timedelta", "description": "Maximum time delta between ground radar and GPM volumes.", }, "elevation_gr": { "units": "degrees", "long_name": "elevation", "description": "Ground radar reference elevation.", }, "ntilt": { "units": "1", "long_name": "ground_radar_tilt_number", "description": "Number of ground radar tilts used for volume matching.", }, "nprof": { "units": "1", "long_name": "gpm_profile_number", "description": "Number of GPM profiles (nrays x nscan) used for volume matching.", }, "pir_gpm": { "units": "dB m-1", "long_name": "GPM_path_integrated_reflectivity", "description": "Path integrated GPM reflectivity volume-matched.", }, "pir_gr": { "units": "dB m-1", "long_name": "GR_path_integrated_reflectivity", "description": "Path integrated GR reflectivity volume-matched.", }, } return metadata
d0c74f6b69fe4344d97ee5e20bd4e537b56d253f
90,360
import mpmath def pdf(x, mu=0, sigma=1): """ Normal distribution probability density function. """ # Defined here for consistency, but this is just mpmath.npdf return mpmath.npdf(x, mu, sigma)
d1ebc4e29437b3171ad928f702b8be97fcfb7bd4
90,364
def enum_value(value, text=None, is_default=False): """Create dictionary representing a value in an enumeration of possible values for a command parameter. Parameters ---------- value: scalar Enumeration value text: string, optional Text representation for the value in the front end is_default: bool, optional Flag indicating whether this is the default value for the list Returns ------- dict """ obj = {'value': value, 'isDefault': is_default} if not text is None: obj['text'] = text else: obj['text'] = str(value) return obj
de53f2565a71f9bf1e9d8ce9b67e1266f2bb300f
90,367
def ice_to_freshwater(icevol, rho_ice=900, rho_water=1000): """Cleanly convert volume of glacial ice (km3) to equivalent volume fresh water (liter). Arguments: icevol = volume of ice to convert, in km3 rho_ice = density of glacial ice (default 900 kg/m3) rho_water = density of freshwater (default 1000 kg/m3) """ km3_to_ltr = 1E12 water_vol_km3 = icevol * rho_ice / rho_water return water_vol_km3 * km3_to_ltr
9d9be462cdad380cd270f3d040f9468b5f9c6c6f
90,370
def align_on_left(txt: str) -> str: """ Remove all leading/trailing spaces for each line. """ txt_out = [] for curr_line in txt.split("\n"): curr_line = curr_line.rstrip(" ").lstrip(" ") txt_out.append(curr_line) res = "\n".join(txt_out) return res
c64c5cdb8aab74596ffe2b9ae57f07d206f1b4c0
90,376
def setTime(date, hour, minute, second): """Takes in a date, and returns a copy of it with the time fields set as specified. Args: date (Date): The starting date. hour (int): The hours (0-23) to set. minute(int): The minutes (0-59) to set. second (int): The seconds (0-59) to set. Returns: Date: A new date, set to the appropriate time. """ return date.replace(hour=hour, minute=minute, second=second, microsecond=0)
159cf4a5dfe3d521c942ce32df239cd686c94106
90,378
def check_blanks(plaintext: list, ciphertext: list) -> int: """Check if the ciphertext can fit in plaintext. Compare the number of blank lines in **plaintext** to the number of lines in **ciphertext**. If they aren't a match, returns the number of extra blank lines needed. Args: plaintext (list): Paragraphs of a fake message in a list of strings (likely from :func:`get_text`). ciphertext (list): Paragraphs of an encrypted message in a list of strings (likely from :func:`get_text`). Returns: Integer representing the number of needed blank lines to fit **ciphertext** in **plaintext**. ``0`` would mean that **ciphertext** can fit in **plaintext**. """ blanks_needed = len(ciphertext) - plaintext.count('') if blanks_needed <= 0: return 0 return blanks_needed
0aa5b2a6eaa1bfa1af388be0c70c6545287dd7a4
90,379
def sortNTopByVal(tosort, top, descending=False): """ Sort dictionary by descending values and return top elements. Return list of tuples. """ return sorted([(k, v) for k, v in tosort.items()], key=lambda x: x[1], reverse=descending)[:top]
7f989b9234abc4a3c83100db1eb8532a7d7b5006
90,383
def create_headers(bearer_token: str): """ Creates the header to be sent along with the request to the api :param bearer_token: the authentication token :return: dictionary with authentication information """ return {"Authorization": f"Bearer {bearer_token}"}
12f1484b938341c92fcaa024d2f9597a3ddfa74a
90,386
def pandas_df_to_records(df): """Convert a Pandas DF to a list of tuples representing rows ('records').""" return df.to_records(index=False).tolist()
c0c1b80dde31ff50e9db980d99e208cb006af0d3
90,391
def pg_base_connection_string(request, pg_db_name): """ A Postgres connection string without the database name at the end. eg. postgresql+asyncpg://virtool:virtool@localhost """ return request.config.getoption("postgres_connection_string")
8d11234cbcb80a438e071a6d3948c31def456b5d
90,396
def trim_exams(image_channels): """ Trims the larger exam to match length. Receives arbitrary list of (num_frames, height, width) as input. The number of frames between the exams will be trimmed to match the dimensionality of the smallest exams. """ min_frames = min([channel.shape[0] for channel in image_channels]) return [channel[:min_frames] for channel in image_channels]
af466a19cae3ccf066ad5bb93dda112d81f19c6d
90,402
import logging import csv import json def convert_csv_file_to_json_file(csv_filename, json_filename=None, seperator=","): """ Purpose: Convert .csv File to .json Args: csv_filename (String): .csv file to convert to .json json_filename (String): filename for the resulting .json seperator (String): String seperator of fields in the .csv Return: json_filename (String): filename for the resulting .json """ if not csv_filename.endswith(".csv"): error_msg = "File does not look like .csv, not converting" logging.error(error_msg) raise Exception(error_msg) if not json_filename: json_filename = csv_filename.replace(".csv", ".json") logging.info(f"Converting {csv_filename} to {json_filename}") with open(csv_filename, 'r') as filec, open(json_filename, 'w') as filej: field_names = filec.readlines(1)[0].split(seperator) reader = csv.DictReader(filec, fieldnames=field_names) filej.write( json.dumps( [row for row in reader], sort_keys=True, indent=4, separators=(',', ': '), ) ) return json_filename
78b49c316c4143a37ab8ef6f57ac6627ca68d200
90,404
def get_node_wrapped_tensor_info(meta_graph_def, path): """Get the Any-wrapped TensorInfo for the node from the meta_graph_def. Args: meta_graph_def: MetaGraphDef containing the CollectionDefs to extract the node name from. path: Name of the collection containing the node name. Returns: The Any-wrapped TensorInfo for the node retrieved from the CollectionDef. Raises: KeyError: There was no CollectionDef with the given name (path). ValueError: The any_list in the CollectionDef with the given name did not have length 1. """ if path not in meta_graph_def.collection_def: raise KeyError('could not find path %s in collection defs. meta_graph_def ' 'was %s' % (path, meta_graph_def)) if len(meta_graph_def.collection_def[path].any_list.value) != 1: raise ValueError( 'any_list should be of length 1. path was %s, any_list was: %s.' % (path, meta_graph_def.collection_def[path].any_list.value)) return meta_graph_def.collection_def[path].any_list.value[0]
46cb8693e8ff59a6f403fbfa1142cba582c434ea
90,406
def make_message(obj, context=None): """ Build a message using the specific context Parameters ---------- obj : :obj:`dict` A dictionary containing the non-context information of a message record. context : :obj:`dict`, optional Dictionary with the link to the context file or containing a JSON-LD context. Returns ------- dict The message with the context. """ if context is None: context = { "@context": "https://raw.githubusercontent.com/nipype/pydra/master/pydra/schema/context.jsonld" } message = context.copy() message.update(**obj) return message
1cfa94a10e45eb39c90b476ec299cad23b4cca8c
90,408
import logging def logger(name, level=None): """ Create logger with appropriate level. """ ret = logging.getLogger(name) ret.addHandler(logging.StreamHandler()) ret.setLevel(getattr(logging, level) if level else logging.INFO) return ret
9d4454f56b3489e05d10ecdd1b8c43f7de7c9399
90,409
def _target_to_test_name(target: str, test_suite_path: str) -> str: """Get test_name from `suite_name_test_name__tf__backend_name`.""" return target.split('__')[0].replace(f'{test_suite_path}_', '')
900d289009c49fa8c1244a997cc45906c63a09c8
90,414
def is_commanddictnode_defined(node): """ A child node is defined if it has either a helptext/callback/summary. If a node's callback is None it can still be undefined. """ return (('callback' in node and not node['callback'] is None) or 'help_text' in node or 'summary' in node)
9f53bdfe4b0c25333ebe50f27aed249fedc736e2
90,415
def diffusion(grid, ivar, alpha): """Compute the diffusion terms of the variable tagged "ivar". Arguments --------- grid : grid object Grid containing data for a given stencil. ivar : string Name of the grid variable to be operated on. alpha : float Diffusion coefficient. Returns ------- D : numpy.ndarray Diffusion terms as an array of floats. """ f = grid[ivar][0,0,:,:] dx, dy = grid.dx, grid.dy D = alpha * ((f[1:-1, 2:] - 2 * f[1:-1, 1:-1] + f[1:-1, :-2]) / dx**2 + (f[2:, 1:-1] - 2 * f[1:-1, 1:-1] + f[:-2, 1:-1]) / dy**2) return D
340eef3606db1a5246a59b56859835d41f9cfe4e
90,417
def loop(conn): """The main loop.""" def process(): try: conn.Process(1) except KeyboardInterrupt: return 0 return 1 while process(): pass
18dfa4d35c4260af87f40870c129c4ab9912edbf
90,423
def wordfreq(text): """Return a dictionary of words and word counts in a string.""" freqs = {} for word in text.split(): freqs[word] = freqs.get(word, 0) + 1 return freqs
63f00ee230b5f26e82dfdccd78026cdae02c645c
90,429
def cshock_dissipation_time(shock_vel,initial_dens): """A simple function used to calculate the dissipation time of a C-type shock. Use to obtain a useful timescale for your C-shock model runs. Velocity of ions and neutrals equalizes at dissipation time and full cooling takes a few dissipation times. Args: shock_vel (float): Velocity of the shock in km/s initial_dens (float): Preshock density of the gas in cm$^{-3}$ Returns: float: The dissipation time of the shock in years """ pc=3.086e18 #parsec in cgs SECONDS_PER_YEAR=3.15569e7 dlength=12.0*pc*shock_vel/initial_dens return (dlength*1.0e-5/shock_vel)/SECONDS_PER_YEAR
b97a184a60ccc1587eee40bad6325131a1348c98
90,432
def errors(form): """Render non field errors for a form.""" return {'form': form}
3800fb8a03113832776bf4db54ef3d8e0b9acea2
90,438
import json def get_es_config(file_path): """ Reads a json file containing the elastic search credentials and url. The file is expected to have 'url', 'username' and 'password' keys """ with open(file_path) as stream: credentials = json.load(stream) return (credentials['url'], (credentials['username'], credentials['password']), credentials['host'])
cdf3e7a3566604445cff1257b57486072c11a469
90,439
import json import hashlib def dict_hash(a_dict): """Return a hash for a dict. See https://stackoverflow.com/a/22003440.""" dict_str = json.dumps(a_dict, sort_keys=True, default=str) return hashlib.md5(dict_str.encode('utf8')).hexdigest()
6c526fe4e2d62a59e2330e1c84ee4fa72e34f35b
90,441
def lcs_analysis(Nb,Ne,Mb,Me,lcs,identical,equivalent,different): """This routine is derived from lcs_to_diff. Instead of writing out the diff based on the snake list it analyses the snake list and establishes whether the reference file and the data file are identical, equivalent, or different. In this case the three results mean: - identical: there are no differences between the two files at all. - equivalent: there are only tolerated differences. - different : there are some non-tolerated differences. The values for the results are taken from the argument list. """ analysis = identical xi1 = Mb-1 yj1 = Nb-1 mxtype = 0 Nsnake = len(lcs) Isnake = 0 itype = 0 if Nsnake == 0: if Nb <= Ne: analysis = different else: if Mb <= Me: analysis = different else: pass else: while (Isnake < Nsnake): (xi2,yj2,xi3,yj3,itype) = lcs[Isnake] Isnake = Isnake + 1 if itype > mxtype: mxtype = itype if mxtype == 1: # there are only exact matches so identical still is the best possible pass elif mxtype == 2: # there are tolerated differences so equivalent is the best possible analysis = equivalent elif mxtype == 3: # there are non-tolerated differences so different is the best possible analysis = different return analysis Isnake = -1 while (Isnake < Nsnake): Isnake = Isnake + 1 if (Isnake < Nsnake): (xi2,yj2,xi3,yj3,itype) = lcs[Isnake] else: xi2 = Me+1 yj2 = Ne+1 xi3 = Me+1 yj3 = Ne+1 if xi1+1 <= xi2 and yj1+1 <= yj2: if xi1+1 == xi2: if yj1+1 == yj2: # # This is a continuation of the previous snake (of a different type) # pass else: analysis = different else: analysis = different xi1 = xi3 yj1 = yj3 return analysis
e85f06d3733acd5e20db40c44c8d479e2ecd68b9
90,442
from typing import Iterable def crc16(*datas: Iterable[int], initial: int = 0) -> int: """Computes a CRC-16 checksum over a series of bytes. Parameters ---------- datas : Iterable[int] A list of data sources, each of which produces a series of bytes. initial : int, optional The initial CRC sum to start computing from. Can be used to chain CRC sums together. Returns ------- int The CRC-16 checksum of the input bytes. """ csum = initial for data in datas: for datum in data: csum ^= datum << 8 for _ in range(8): if csum & 0x8000: csum = (csum << 1) ^ 0x1021 else: csum <<= 1 return csum & 0xFFFF
a47044063259f0e7a6160e30a4869a30ca874979
90,443
def image_similarity_hash(im): """ Calculates average hash as defined in http://www.hackerfactor.com/blog/index.php?/archives/432-Looks-Like-It.html Args: im (pillow.Image) Returns: list: hash consisting of list of booleans """ im = im.resize((10, 10), resample=0) # 10x10 to be more stricter in determine similarity, as measured during testing im = im.convert(mode='L') data = list(im.getdata()) avg = sum(data) / len(data) image_hash = [i >= avg for i in data] return image_hash
b67f1b1020fe34ca7a0f4e828ca7e111aa0f4561
90,445