content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def get_H_O_index_list(atoms): """Returns two lists with the indices of hydrogen and oxygen atoms. """ H_list = O_list = [] for index, atom in enumerate(atoms): if atom[0] == 'H': H_list.append(index) if atom[0] == 'O': O_list.append(index) return H_list, O_list
337d6809e3953172601b7a698b7fb28b96bc3720
36,754
def _one_or_both(a, b): """Returns f"{a}\n{b}" if a is truthy, else returns str(b). """ if not a: return str(b) return f"{a}\n{b}"
c206b64654817b91962a3583d08204cc02b8c003
36,756
def update_figure_data_dropdowns(_df1, _df2): """ :param _df1: regular season DataFrame values :param _df2: play-off DataFrame values :return: return lists with dictionaries for drop down menus """ if len(_df1) > 0: drop_values = [{'label': 'Regular season', 'value': 'Regular season'}] else: drop_values = [] if len(_df2) > 0: drop_values += [{'label': 'Play-off', 'value': 'Play-off'}] return drop_values
f50fdd29946cd639b42d8883fc4c08b65d87293a
36,757
def load_inspection_page(): """Open the inspection_page.html and read it. The purpose of this function is to keep the user from having to make repeated requests at the KCHI API which has a tendency to not like the attention. It will return a bytes version and a string version of the HTML. """ with open('inspection_page.html') as f: content = f.read() return content
6f3b9331c082ec2607803976ec93fe33e90e921e
36,758
def total_loss_at_strike(call_df, put_df, expiry_price): """Calculate loss at strike price""" # All call options with strike price below the expiry price will result in loss for option writers in_money_calls = call_df[call_df['strikePrice'] < expiry_price][["openInterest", "strikePrice"]] in_money_calls["CE loss"] = (expiry_price - in_money_calls['strikePrice'])*in_money_calls["openInterest"] # All put options with strike price above the expiry price will result in loss for option writers in_money_puts = put_df[put_df['strikePrice'] > expiry_price][["openInterest", "strikePrice"]] in_money_puts["PE loss"] = (in_money_puts['strikePrice'] - expiry_price)*in_money_puts["openInterest"] total_loss = in_money_calls["CE loss"].sum() + in_money_puts["PE loss"].sum() return total_loss
5b3233c149392cbd3c92a277f296c4d26018276d
36,759
def to_bdc(number: int) -> bytes: """ 4 bit bcd (Binary Coded Decimal) Example: Decimal 30 would be encoded with b"\x30" or 0b0011 0000 """ chars = str(number) if (len(chars) % 2) != 0: # pad string to make it a hexadecimal one. chars = "0" + chars bcd = bytes.fromhex(str(chars)) return bcd
948da3d4e50e6348a3174826c8bd9ec5c78b559c
36,760
import math def p_value(z): """A formula that is accurate to within 10^(-5) is the following: P(z) = 1 - d(z)*(a1*t + a2*(t^2) + a3*(t^3)), where z>=0, P(z) is the standard normal cumulative, d(z) is the standard normal density, t = 1/(1+p*z), p = 0.33267, a1 = 0.4361836, a2 = -0.1201676, a3 = 0.9372980. This is formula 26.2.16 from Abramowitz and Stegun. If z<0, use P(z) = 1 - P(-z). If they need small tail probabilities with low relative error, the 10^(-5) possible error may be too large in some cases. For large positive z, try 1-P(z) = d(z)*(1/(z+1/(z+2/(z+3/(z+4/(z+5/z)))))). Check this in R to make sure relative errors are OK for large z. If not, extend to 6, 7, etc. (it's a continued fractions expansion). d(z) = (1/(sqrt(2*pi))) * exp (-(z**2) / 2)""" p = 0.33267 a1 = 0.4361836 a2 = -0.1201676 a3 = 0.9372980 t = 1/(1+(p*z)) pi = 3.141592653589793238 y = (1/(math.sqrt(2*pi)))* math.exp(-(z**2)/2) if z >= 0: p_val = 1-(y*((a1*t) + a2*(math.pow(t,2)) + a3*(math.pow(t,3)))) else: z = z*(-1) p_val = (y*((a1*t) + a2*(math.pow(t,2)) + a3*(math.pow(t,3)))) p_val = 2*(1-p_val) return p_val
1e0fadb166603ff381eb7a240bab5536b792a491
36,761
def get_counters(cursor): """ Fetches counters for the different database categories and returns them in a dictionary """ counter = {} cursor.execute('SELECT "count" FROM "counters" WHERE "category" = "genes"') counter["genes"] = int(cursor.fetchone()[0]) cursor.execute('SELECT "count" FROM "counters" WHERE "category" = "transcripts"') counter["transcripts"] = int(cursor.fetchone()[0]) cursor.execute('SELECT "count" FROM "counters" WHERE "category" = "vertex"') counter["vertices"] = int(cursor.fetchone()[0]) cursor.execute('SELECT "count" FROM "counters" WHERE "category" = "edge"') counter["edges"] = int(cursor.fetchone()[0]) cursor.execute('SELECT "count" FROM "counters" WHERE "category" = "dataset"') counter["datasets"] = int(cursor.fetchone()[0]) cursor.execute('SELECT "count" FROM "counters" WHERE "category" = "observed"') counter["observed"] = int(cursor.fetchone()[0]) return counter
761337a42b13f1e65bc21f0bf9f815a72fd9bef5
36,762
import re def reformat_page2(soup): """Specific reformatting for page 2""" # Rename the duplicate Entrance Difficulty label on the Overview page. th_tag = soup.find('th', string='Entrance Difficulty') if th_tag: th_tag.string = 'Entrance Difficulty, Description' # Add missing column label to first column of 'Examinations' table. caption = 'Examinations' caption_tag = soup.find('caption', string=caption) if caption_tag: thead_tag = caption_tag.find_next('thead') thead_tag.td.string = 'Requirement' # Prepend labels to GPA row labels. regex = re.compile('Grade Point Average') caption_tag = soup.find('caption', string=regex) if caption_tag: gpa_table = caption_tag.parent header_tag = gpa_table.tbody.th header_tag.string = 'GPA, Average' sub_tags = gpa_table.find_all('th')[1:] for tag in sub_tags: tag.string = 'GPA, ' + tag.string # Prepend labels to 'Other Application Requirements' table. caption = 'Other Application Requirements' caption_tag = soup.find('caption', string=caption) if caption_tag: th_tags = caption_tag.parent.find_all('th') for tag in th_tags: text = tag.get_text(' ', strip=True) tag.string = 'Application Requirements, ' + text return soup
bc10248859add62d5c8da30779ca68d8dc7bcfb3
36,763
def is_arn_filter_match(arn: str, filter_arn: str) -> bool: """ Returns True if the given arn matches the filter pattern. In order for an arn to be a match, it must match each field separated by a colon(:). If the filter pattern contains an empty field, then it is treated as wildcard for that field. Examples: arn=abc:def:ghi filter_arn=abc:def:ghi #exact match returns True arn=abc:def:ghi filter_arn=abc::ghi #wildcarded match returns True """ arn_split = arn.split(":") filter_arn_split = filter_arn.split(":") # arns of different field lenth are treated as mismatch if len(arn_split) != len(filter_arn_split): return False for i in range(0, len(arn_split), 1): # filter pattern with value defined can be considered for mismatch if filter_arn_split[i] and filter_arn_split[i] != arn_split[i]: return False return True
e94cf5852f4993386b1cbe8d6ada25c6100fb348
36,765
def calc_delay2(freq, freqref, dm, scale=None): """ Calculates the delay in seconds due to dispersion delay. freq is array of frequencies. delay is relative to freqref. default scale is 4.1488e-3 as linear prefactor (reproducing for rtpipe<=1.54 requires 4.2e-3). """ scale = 4.1488e-3 if not scale else scale return scale*dm*(1./freq**2 - 1./freqref**2)
7475f140f593692ece4976795a0fd34eba93cffd
36,766
def c_terminal_proline(amino_acids): """ Is the right-most (C-terminal) amino acid a proline? """ return amino_acids[-1] == "P"
f54563ff59973d398e787186a1c390da03ff8999
36,768
import os import csv def get_data(): """Load our data from file.""" with open(os.path.join('data', 'data_file.csv'), 'r') as fin: reader = csv.reader(fin) data = list(reader) # print("eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee") # print(data) return data
59376dda3341b54d99e456ade4d5bcd7fdf715a1
36,769
import base64 def base64encode(data): """ Return bytes encoded as base64. :param bytes data: Binary data to encode. :return: Base64 encoded bytes :rtype: str """ return base64.b64encode(data).decode('utf-8')
455ba57e13980d8d144b1a319d3d85872648e0af
36,770
from typing import List def _parse_md_table_header(md_table_header: str) -> List[str]: """Return a list of column names.""" return [key.strip() for key in md_table_header.strip().split("|") if key]
7250116e00cb03c7fe333ee9d235ec3d2e8930d8
36,771
def add_message_to_session(request, message): """Add a message to the session, used by server side""" i = 0 if 'messages' in request.session: while str(i) in request.session['messages']: i += 1 else: request.session['messages'] = dict() request.session.modified = True request.session['messages'][i] = message return request
5f91db668ee69112c0d215d3d7b9990e31e72c05
36,772
import platform import os def _get_c_library_filename(): """Return the filename of the CODA shared library depending on the current platform. """ if platform.system() == "Windows": return "coda.dll" if platform.system() == "Darwin": library_name = "libcoda.dylib" else: library_name = "libcoda.so" # expand symlinks (for conda-forge, pypy build) dirname = os.path.dirname(os.path.realpath(__file__)) # look in different directories based on platform for rel_path in ( "..", # pyinstaller bundles "../../..", # regular lib dir "../../../../lib", # on RHEL the python path uses lib64, but the library might have gotten installed in lib ): library_path = os.path.normpath(os.path.join(dirname, rel_path, library_name)) if os.path.exists(library_path): return library_path
d5e3c8b50b08a3f7ee71cfde12abed2c648015ee
36,773
def format_channels(channels, maxdevices=4, numperhat=2): """ This function converts a list of channels to be used (e.g. [0,1,3]) into a list of lists [[0,1], [1], [None], [None]] Args: channels (list): List containing channels to use maxdevices (int): Maximum number of hats on device numperhat (int): Total number of channels per hat Returns: chans (list): List of lists describing channels in correct format """ chans = [] for i in range(maxdevices): chans.append([None]) for i in channels: if(i.channel % 2 == 0): ind = i.channel//numperhat chans[ind].append(0) if(chans[ind].count(None) > 0): chans[ind].remove(None) else: ind = (i.channel-1)//numperhat chans[ind].append(1) if(chans[ind].count(None) > 0): chans[ind].remove(None) return chans
40b8fe9f37be0a5e900b756687589d49d86b60fb
36,774
from pathlib import Path def uhome() -> Path: """Simple function to return root dir.""" return Path.home()
6b4e9a3ce0174a30d3e4dc2a0845908eb8552c1c
36,775
def no_name(): """return a no-name tuple""" return ("no_name", "#000000")
a5ceb83147e980341d9dbd277d8bc57425b538fa
36,776
def count_mills(board, player): """ for each mill in the board MILLS for each place in the mill if the value of the place is the player increment mill count if the mill count == 3 then its a mill increment the total_mill_count :param board - board as a dictionary: :param player - current player as a string: :return - count of mills as integer: """ total_mill_count = 0 for mill in board.MILLS: mill_count = 0 for place in mill: place_value = board.points[place] if place_value == player: mill_count += 1 if mill_count == 3: total_mill_count += 1 return total_mill_count
2b82adaf63e629f3f2906f806381aebf65c2d17e
36,777
def test_curve_2d_plot(well): """ Tests mpl image of curve as VD display. """ curve = well.data['GR'] # plot 2D curve curve.plot_2d() # subtract curve values from 200 curve2 = 200-curve # plot a curve with clipped colored mask fig = curve2.plot_2d(cmap='viridis', curve=True, lw=-.5, edgecolor='k').get_figure() return fig
55ebb34b2fdf9f11565883b8f04035afcace17ec
36,778
def fscore_from_sed_eval_metrics(sed_eval_metrics): """extract class-wise and averaged fscores, precisions and recalls from sed_eval metrics object Args: sed_eval_metrics: Returns: fscore (dict of float): fscore values precision (dict of float): precision values recall (dict of float): recall values """ f = {} p = {} r = {} sed_eval_results_classwise = sed_eval_metrics.results_class_wise_metrics() for key in sed_eval_results_classwise: f[key] = sed_eval_results_classwise[key]['f_measure']['f_measure'] p[key] = sed_eval_results_classwise[key]['f_measure']['precision'] r[key] = sed_eval_results_classwise[key]['f_measure']['recall'] sed_eval_results_macro = sed_eval_metrics.results_class_wise_average_metrics() f['macro_average'] = sed_eval_results_macro['f_measure']['f_measure'] p['macro_average'] = sed_eval_results_macro['f_measure']['precision'] r['macro_average'] = sed_eval_results_macro['f_measure']['recall'] sed_eval_results_micro = sed_eval_metrics.results_overall_metrics() f['micro_average'] = sed_eval_results_micro['f_measure']['f_measure'] p['micro_average'] = sed_eval_results_micro['f_measure']['precision'] r['micro_average'] = sed_eval_results_micro['f_measure']['recall'] return f, p, r
20beb5153182ad305a42b943da264f19e7694522
36,779
def concat(*args): """ This function join all args into single one :param args: :return: concat string """ return ''.join(args)
49080bd237610788c90d6aca2d6616c43de5144e
36,780
import datetime as _datetime def _get_last_day(datetime): """Return the start of the day before 'datetime', e.g. _get_last_day(April 1st) will return March 31st """ datetime = datetime - _datetime.timedelta(days=1) return _datetime.datetime( year=datetime.year, month=datetime.month, day=datetime.day, tzinfo=datetime.tzinfo )
01fbabcbac0d3dc4bab7cb93fa1fde39b0d16b8f
36,781
def find_default_route(routes): """ :param routes: :return: """ _defaults = [] for route in routes: if route.get('destination') == 'default': if 'metric' not in route: route['metric'] = 0 _defaults.append(route) if not _defaults: return {} return sorted(_defaults, key=lambda _d: int(_d['metric']))[0]
4f88427991e82ca72a19d0457d99f16623e8f036
36,782
def player_bs_l(list, value, step=0, silent=False): """Player (B)inary (S)earch (L)ast name Search, stop and return object once it matches parameters :param list: Set to look for :param value: Value to match :param step: Number of steps the search have already taken :param silent: Keep this function from printing the steps :return: None if not found, the matching object otherwise """ found = None step += 1 if not silent: print("Step count: " + str(step)) if len(list) > 1: middle = len(list) // 2 if list[middle].last == value: found = list[middle] else: if list[middle].last < value: found = player_bs_l(list[middle:], value, step, silent) else: found = player_bs_l(list[:middle], value, step, silent) return found
31e8ea8943fcc37e7c00a58ce526b982ca57a942
36,784
import logging def get_logger(name): """ Get or create a logger with the specified name. Recommended usage: get_logger(__name__) """ return logging.getLogger(name)
d28588d8bc2a370a1711e81ccc42c51a81a1c8b2
36,785
def solve(roads, houses): """ :roads: A list of the roads that are in the problem :houses: How many houses are in the problem """ score = 0 roads.sort() for x in range(houses-1): score += roads.pop(0) return score
1c532206784805cd137781b3181964015c96eee2
36,787
def kl_gauss(x, y, sig2=1.): """ Kullback-Leibler divergence for Gaussian distributions.""" return (x - y) ** 2 / (2 * sig2)
caf6912e347192e85e3f4d6336d4b6bcb9d6468f
36,789
def get_profile(sdf): """ Gets the field profiles of the specified Spark dataframe. :param sdf: Spark dataframe. :return: Dictionary. """ dtypes = {k: v for k, v in sdf.dtypes} cat_types = sdf.rdd \ .map(lambda r: r.asDict()) \ .flatMap(lambda r: [((k, r[k]), 1) for k, v in dtypes.items() if v == 'string']) \ .reduceByKey(lambda a, b: a + b) \ .map(lambda tup: (tup[0][0], {tup[0][1]: tup[1]})) \ .reduceByKey(lambda a, b: {**a, **b}) \ .map(lambda tup: (tup[0], [(k, v) for k, v in tup[1].items()])) \ .map(lambda tup: (tup[0], sorted(tup[1], key=lambda t: (t[1], t[0]), reverse=True))) \ .map(lambda tup: (tup[0], [t[0] for t in tup[1]])) \ .collect() cat_types = {tup[0]: tup[1] for tup in cat_types} con_types = {k: [1.0] for k, v in dtypes.items() if v != 'string'} all_types = {**cat_types, **con_types} return all_types
a08ce5ce5410319a719394aa8aa18b95f73458c5
36,790
from datetime import datetime from dateutil import parser def liquid_date(base, fmt): """Format a date/datetime""" if base == "now": dtime = datetime.now() elif base == "today": dtime = datetime.today() else: dtime = parser.parse(base) return dtime.strftime(fmt)
4d67c9cfeaaf1dfcca5f5cdb246fba7d6374323f
36,791
from pathlib import Path from typing import List def dir_files(target_dir: Path, *, ext: str = "") -> List[str]: """return names of files in directory""" return [f.name for f in target_dir.iterdir() if (target_dir / f).is_file() and str(f).endswith(ext)]
dd81a8df6d9405540a7409d321607e7bd9115bab
36,793
def _list2pair(s_list): """Convert a list into pair.""" return s_list.pair
06849caa28ee42e04505e2bfe86d10159c722084
36,794
def non_single_character_dividers(): """ Get a list of all the "blacklisted" dividers. These dividers are not made of a single character. Some examples of this include: * Divider 18 - :code:`( ͡° ͜ʖ ͡°)` * Divider 33 - :code:`^,^,^,^,^,^,` * Divider 34 - :code:`&*&*&*&*&*&*` :return: A list of divider IDs. :rtype: List[int] """ return [18, 19, 22, 33, 34, 35, 222, 223, 224, 226, 233, 234, 242, 244]
7d5d09f82d8bda82068a022e6d044ef42e56efb9
36,795
from typing import Union from pathlib import Path from typing import List def second(filename: Union[str, Path], num_rounds: int = 10_000_000) -> int: """ Part 2 """ vals: List[int] = [] with open(filename, "rt") as infile: for cup in infile.read().strip(): vals.append(int(cup) - 1) vals.extend(range(max(vals) + 1, 1_000_000)) num_vals = 1_000_000 val_to_next = [0] * num_vals for val, next_val in zip(vals[:-1], vals[1:]): val_to_next[val] = next_val val_to_next[vals[-1]] = vals[0] cur_val = vals[0] for _ in range(num_rounds): removed_vals = [ val_to_next[cur_val], val_to_next[val_to_next[cur_val]], val_to_next[val_to_next[val_to_next[cur_val]]], ] next_cup = (cur_val - 1) % num_vals while next_cup in removed_vals: next_cup = (next_cup - 1) % num_vals val_to_next[cur_val] = val_to_next[removed_vals[-1]] new_next = val_to_next[next_cup] val_to_next[next_cup] = removed_vals[0] val_to_next[removed_vals[-1]] = new_next cur_val = val_to_next[cur_val] return (val_to_next[0] + 1) * (val_to_next[val_to_next[0]] + 1)
65974f0e5f73df15d8a9c62d192a86164a13155a
36,796
def parse_fasta(filename: str): """ Method to parse a FASTA file Opens a file and reads the content and returns the sequence inside each separate block Denoted with '>' header before each sequence """ stuff = open(filename, 'r').readlines() header: list = [] sequence: list = [] head = None seq = "" for line in stuff: if line.startswith('>'): header.append(line[1:-1]) if head: sequence.append(seq) seq = "" head = line[1:] else: seq += line.rstrip() sequence.append(seq) return header, sequence
93f785e6059e24c75139ffb0118ce3a3b3c1f793
36,797
def get_meals(v2_response, building_id): """Extract meals into old format from a DiningV2 JSON response""" result_data = v2_response["result_data"] meals = [] day_parts = result_data["days"][0]["cafes"][building_id]["dayparts"][0] for meal in day_parts: stations = [] for station in meal["stations"]: items = [] for item_id in station["items"]: item = result_data["items"][item_id] new_item = {} new_item["txtTitle"] = item["label"] new_item["txtPrice"] = "" new_item["txtNutritionInfo"] = "" new_item["txtDescription"] = item["description"] new_item["tblSide"] = "" new_item["tblFarmToFork"] = "" attrs = [{"description": item["cor_icon"][attr]} for attr in item["cor_icon"]] if len(attrs) == 1: new_item["tblAttributes"] = {"txtAttribute": attrs[0]} elif len(attrs) > 1: new_item["tblAttributes"] = {"txtAttribute": attrs} else: new_item["tblAttributes"] = "" if isinstance(item["options"], list): item["options"] = {} if "values" in item["options"]: for side in item["options"]["values"]: new_item["tblSide"] = {"txtSideName": side["label"]} items.append(new_item) stations.append({"tblItem": items, "txtStationDescription": station["label"]}) meals.append({"tblStation": stations, "txtDayPartDescription": meal["label"]}) return meals
636e8ccb9af8fd32326bc629ebeb965fb2e310f7
36,798
def merge_components(local_component, target_component): """ Find resulting component from merging the first (local) component into the second. The resulting component will maintain the parent identifier of the target component. """ local_bounds, local_center, local_size, local_parent = local_component target_bounds, target_center, target_size, target_parent = target_component merged_bounds = [ min(local_bounds[0], target_bounds[0]), max(local_bounds[1], target_bounds[1]), min(local_bounds[2], target_bounds[2]), max(local_bounds[3], target_bounds[3]), min(local_bounds[4], target_bounds[4]), max(local_bounds[5], target_bounds[5]), ] merged_size = local_size + target_size # use weighted averaging to find center. the center point is not guaranteed to occur at a # position containing the component (eg if it is "C" shape) merged_center = ( local_center * local_size + target_center * target_size ) / merged_size return merged_bounds, merged_center, merged_size, target_parent
bdb9b437d0981f46b676c427034b7716c8727560
36,799
def get_number_of_ratings(ratings): """Gets the total number of ratings represented by the given ratings object. Args: ratings: dict. A dict whose keys are '1', '2', '3', '4', '5' and whose values are nonnegative integers representing frequency counts. Returns: int. The total number of ratings given. """ return sum(ratings.values())
f6096357bbb380760c5b0087d1f63d27d958de30
36,801
def get_tweet_data(media_id): """ """ # tweet data in db return {}
23c19c66b73ea7277bbc4320e498c8d766f7a494
36,802
def validate(a, b) -> bool: """Determines if a class is valid against another""" return a.__validate__(b)
af6d08f373f63000df4141066e4a69492cd8f0e8
36,803
import os def samepath(p1, p2): """Return True if p1 and p2 refer to the same path. That is, when both are strings or os.PathLike objects, compare their absolute, case insensitive representation. """ return os.path.normcase(os.path.realpath(os.path.abspath(p1))) == ( os.path.normcase(os.path.realpath(os.path.abspath(p2))) )
78ff71e78cfd3e1c353ce7d5a8035546610332fa
36,804
def families_quadrupoles(): """Return quadrupole families.""" return [ 'QF2L', 'QD2L', 'QF3L', 'QD1', 'QF1', 'QD2A', 'QF2A', 'QF2B', 'QD2B', 'QF3', 'QD3', 'QF4', 'QD4']
a11c25e2554ffabe89615a6a12a70c9b78c8aa93
36,805
def tag(*tags): """Select a (list of) tag(s).""" vtag = [t for t in tags] return {"tag": vtag}
29b48da2ed4b56ff20feb1bc0fd0455cfbc7f8dc
36,807
def mat333mult(a, b): """ Multiply a 3x3 matrix with a 3x1 matrix. Parameters ---------- a : tuple of tuple of float 3x3 matrix b : tuple of tuple if float 3x1 matrix Returns ------- res : list of float 3x1 matrix """ res = [0, 0, 0] r3 = range(3) for i in r3: res[i] = sum([a[i][j]*b[j] for j in r3]) return res
f37803b028453f209b0a2e86f8b1b372962af47d
36,808
from typing import List import re def natural_sort(in_list: List[str]) -> List[str]: """ :param in_list: list of strings which have to be sort like human do :return: sorted list od strings """ def convert(text): return int(text) if text.isdigit() else text.lower() def alphanum_key(key): return [convert(c) for c in re.split('([0-9]+)', key)] return sorted(in_list, key=alphanum_key)
7a5c0e2f9755fce0a14c29829096be850942935e
36,809
def zero(_): """ Return always 0. :param _: anything :return: 0 """ return 0
f43560651de15f3aa1633ef31d5eed425f30015c
36,810
def module_defaults(module_config): """ Applies default settings to modules if they aren't present in the given configuration """ defaults = { 'rooms': [], 'params': {}, 'enabled': True, 'path': None, } for k, v in defaults.items(): if k not in module_config: module_config[k] = v return module_config
7f6fc2c3986357557df64af01c8ae277137e3886
36,812
import torch def mse(y_pred, y_true, masks=None): """Compute mean square error (MSE) loss with masks. Parameters ---------- y_pred : :obj:`torch.Tensor` predicted data y_true : :obj:`torch.Tensor` true data masks : :obj:`torch.Tensor`, optional binary mask that is the same size as `y_pred` and `y_true`; by placing 0 entries in the mask, the corresponding dimensions will not contribute to the loss term, and will therefore not contribute to parameter updates Returns ------- :obj:`torch.Tensor` mean square error computed across all dimensions """ if masks is not None: return torch.mean(((y_pred - y_true) ** 2) * masks) else: return torch.mean((y_pred - y_true) ** 2)
03354acc9538ebae85d83d28fc9c46a726c1dc92
36,813
def sort_state(state, key_name): """ Sort data into decreasing order by key with given name""" key_col = state.names.index(key_name) L = sorted([(state.data[stu][key_col], stu) for stu in state.students], reverse=True) stu_order = [stu for (ws, stu) in L] new_state = state.copy() new_data = [new_state.data[stu] for stu in stu_order] new_state.data = new_data return new_state
b7d88d91b82027aa3ce05c97f7d8035aadc0fed5
36,816
import inspect def remove_kwargs(model_fn, kwargs): """take in the ``model_fn`` and only keep variables from kwargs that can be consumed""" # compare variables between the model_fn and kwargs if they are different then remove it with warning arg_spec = inspect.getfullargspec(model_fn) if kwargs and arg_spec.varkw != None: diff = set(kwargs.keys()) - set(arg_spec.args) for d in list(diff): kwargs.pop(d) return kwargs
b88bda9bf253f2ff536ac614859f4999844f5cf6
36,817
def set_target(dataframe, target): """ :param dataframe: Full dataset :param target: Name of classification column :return x: Predictors dataset :return y: Classification dataset """ x = dataframe.drop(target, axis=1) y = dataframe[target] return x, y
767b212d54295ad8681731583a849c48ca766400
36,818
def project_editors_primary_ips(cursor, query): """ Get user_id, user_name, and primary ip from recentchanges table. """ output = [] cursor.execute(query) rows = cursor.fetchall() for row in rows: output.append([row[0], row[1], row[2], row[3]]) #generalize for any number of fields? return output
37cc3c33f5ca14d95fa90239b754d8b4f32498bf
36,819
import os import fnmatch def files(match='*.gms', ext='rst'): """return all input files in ../model matching `match` and their associated output files with the given extension """ pth = os.path.join('..', 'message_ix', 'model') ins = [os.path.join(d, f) \ for d, _, files in os.walk(pth) \ for f in fnmatch.filter(files, match)] outs = [] for inf in ins: p, f = os.path.split(inf) outf = os.path.join('source', 'model', p[len(pth) + 1:], '{}.{}'.format(os.path.splitext(f)[0], ext)) outs.append(outf) return ins, outs
91479a1b272dcb5f18a53330505fcc48fc7a23fa
36,820
def ticker_price_for_dest_amount(side: str, start_amount: float, dest_amount: float): """ :return: price for order to convert start_amount to dest_amount considering order's side """ if dest_amount == 0 or start_amount == 0: raise ValueError("Zero start ot dest amount") if side is None: raise ValueError("RecoveryManagerError: Side not set") else: side = side.lower() if side == "buy": return start_amount / dest_amount if side == "sell": return dest_amount / start_amount return False
c9ed70b7b62df29f47e873baae49e60fcb368523
36,821
import torch def alpha_estimation_loss(pred_alpha, gt_alpha, input_trimap_argmax): """ input_trimap_argmax 0: background 1: unknown 2: foreground """ # mask = torch.zeros(input_trimap_argmax.shape).cuda() # mask[input_trimap_argmax == 1] = 1. mask = input_trimap_argmax.eq(128 / 255).type(torch.FloatTensor) mask = mask.unsqueeze(dim=1) # transforms.ToPILImage()(mask[0, :, :, :]).save("temp_pics/mask.png") mask = mask.cuda() diff = (pred_alpha - gt_alpha + 1e-12).mul(mask) return torch.abs(diff).sum() / (mask.sum() + 1.)
1eaf486a8f2a8d20ea536fb00593ec682cf08538
36,822
def reverse_string(input: str) -> str: """ Return reversed input string """ if len(input) == 0: return "" else: # this scales O(k * n) # because we're making a copy each time # the reverse function is recursively called to slice the part of the string except the first character and concatenate the first character to the end of the sliced string. return reverse_string(input[1:]) + input[0]
78ee7be0f20855b35b3ee541170555d6d3c4ab2a
36,823
def render_profile_list(profile_list): """ Inclusion tag for rendering a list of profiles. Context:: Profile List Template:: accounts/profile_list.html """ return dict(profile_list=profile_list)
7c3673f719ec061e4b9cade34195178c39b252c5
36,824
import math def isprime_ver2(n): """ Returns True if n is prime, False otherwise. * Only check up to the square root of n. # We only need to search up to the square root of n because if a number N is not prime, it # can be factored into 2 factors A and B. N = A * B # If A and B > squareroot(N), A*B would be greater than N. Therefore, at least one of those # factors must be less or equal to the square root of N. This is why we only need to test for # factors less than or equal to the square root. # Mathematical representation: if N = A*N and A <= B then A*A <= A*N = N """ if n < 2: return False i = 2 limit = math.sqrt(n) while i <= limit: if n % i == 0: return False i += 1 return True
093a8298ee1b5a38a87abdf4e5d6c7814039377f
36,825
def _kann_nicht(message, groups): """Return a certain quip referring to a specific user. """ return [ {'text': 'kann-nicht wohnt in der will-nicht-straße,' ' {}'.format(message.sender)} ]
6dd93959c199d1919bcd208f9a76fb721c79db87
36,826
import re def _remove_command(text, command): """Removes '\\command{*}' from the string 'text'. Regex expression used to match balanced parentheses taken from: https://stackoverflow.com/questions/546433/regular-expression-to-match-balanced-parentheses/35271017#35271017 """ return re.sub(r'\\' + command + r'\{(?:[^}{]+|\{(?:[^}{]+|\{[^}{]*\})*\})*\}', '', text)
b5a4284aac7fc28e5eb4f57295fd078f1931a849
36,827
def aerocom_n(x, bc, oc, so2, nh3): """ERFari linear in emissions including nitrate Inputs ------ x : obj:`numpy.array` Time series of aerosol emissions bc : float Radiative efficiency of black carbon, W m**-2 (TgC yr**-1)**-1 oc : float Radiative efficiency of organic carbon, W m**-2 (TgC yr**-1)**-1 so2 : float Radiative efficiency of sulfate (expressed as SO2 emissions), W m**-2 (TgSO2 yr**-1)**-1 nh3 : float Radiative efficiency of nitrate (expressed as NH3 emissions), W m**-2 (TgSO2 yr**-1)**-1 Returns ------- res : obj:`numpy.array` Time series of ERFari """ return bc*x[0] + oc*x[1] + so2*x[2] + nh3*x[3]
7a5b69a0b00a3840f4d974d47443af353bcfa06e
36,828
def perm(n, m): """ permutation: nPm >>> perm(5, 2) # 5*4 20 """ f = 1 for i in range(n - m + 1, n + 1): f *= i return f
5152906fa75bfef6141d59ca7e7d5d51ddd6b4dd
36,830
def get_item(dict, key): """Get an item from a mapping.""" return dict.get(key)
ba7e54f68c788319890deef6428f8969b042aed9
36,831
from functools import reduce def countWrong(L, tolerance): """ Returns the number of elements of L with an absolute """ """ value above the specified tolerance. """ return reduce(lambda x,y:x+y, \ map(lambda x:1 if abs(x)>tolerance else 0, L))
006fbd91593de2f8b7c67e13865f1e766bc78b37
36,833
import hashlib def get_gravatar_for_email(email: str): """Return an 80px Gravatar for the given email address. Async friendly. """ url = "https://www.gravatar.com/avatar/{}.jpg?s=80&d=wavatar" return url.format(hashlib.md5(email.encode("utf-8").lower()).hexdigest())
b88e6a29f11cbdc56666c0dff02398382f29be8a
36,834
def renduMonnaieDynaIterDetail(montant, systeme, verbose = False): """Une version Bottom -> Top""" pieces_min = [ [[0, 0] for _ in range(len(systeme))] for _ in range(montant + 1)] for m in range(1, montant + 1, systeme[0]): if m % systeme[0] == 0: pieces_min[m][0] = [m // systeme[0], systeme[0]] else: pieces_min[m][0] = [0, systeme[0]] for indexpiece in range(1, len(systeme)): if systeme[indexpiece] <= m: p = m - systeme[indexpiece] if 1 + pieces_min[p][indexpiece][0] < pieces_min[m][indexpiece - 1][0]: #nb minimal de pieces rendues pour un montant m avec des pieces d'index <= indexpiece pieces_min[m][indexpiece][0] = 1 + pieces_min[p][indexpiece][0] #valeur de la dernière pièce rendue pieces_min[m][indexpiece][1] = systeme[indexpiece] else: pieces_min[m][indexpiece] = pieces_min[m][indexpiece - 1] else: pieces_min[m][indexpiece] = pieces_min[m][indexpiece - 1] if verbose: print(pieces_min) nbpieceMin = pieces_min[montant][-1][0] dernierePiece = pieces_min[montant][-1][1] reste = montant - dernierePiece rendu = [dernierePiece] while reste > 0: dernierePiece = pieces_min[reste][-1][1] reste = reste - dernierePiece rendu.append(dernierePiece) return rendu
9392263c054fbfe7cb9b3c90b2d75fe3e60300d2
36,835
def filter_headers(http_obj, filtered): """Exclude some headers.""" return {name: val for name, val in http_obj.headers.items() if name not in filtered}
5a44e28d0d0a367790be357d93285467006bb3a7
36,836
import os def pull_screenshot(): """ docstring here """ for n in range(1, 100): if (not os.path.exists("%d.png" % (n))): name = "%d.png" % (n) os.system('adb shell screencap -p /sdcard/%s' % name) os.system('adb pull /sdcard/%s .' % name) return name return
1ad2564b92fab43dd2293769675c3b96ee5d0510
36,837
def agg_tsmonthly(ts, **kwargs): """ Parameters ---------- ts : pandas.DataFrame time series of a point kwargs : dict any additional keyword arguments that are given to the ts2img object during initialization Returns ------- ts_agg : pandas.DataFrame aggregated time series, they all must have the same length otherwise it can not work each column of this DataFrame will be a layer in the image """ # very simple example # aggregate to monthly timestamp # should also make sure that the output has a certain length return ts.asfreq("M")
db6d27d589f6be65c7c10882e176fd2f29d88d7e
36,838
def scale_vector(v, factor): """returns vector factor*v where factor is float and v is vector""" v.x = v.x * factor v.y = v.y * factor v.z = v.z * factor return v
6b5ded0fb7b52c39d33f1750bddda19cf6fd74b4
36,841
def extract_nothing(fileobj, keywords, comment_tags, options): """Pseudo extractor that does not actually extract anything, but simply returns an empty list. """ return []
db941abdf86c344863c56acf12312f97261e383f
36,842
def getToken(signature, token, end_char): """Function that returns an string with the value of the token.""" start = signature.find(token) + len(token) end = signature.find(end_char, start) return signature[start:end]
876e7e4f7d3878c98899cdfcaac9dadd4847815c
36,843
def mod_inverse(a, m): """Return a^-1 mod m (modular inverse)""" def egcd(a, b): """Return an extended greatest common divisor for a, b""" if a == 0: return b, 0, 1 g, y, x = egcd(b % a, a) return g, x - y*(b // a), y g, x, y = egcd(a, m) if g != 1: raise ValueError("No modular inverse for: a={:d}, m={:d}".format(a, m)) return x % m
765d84ddaa2416f62d42ac45ecfff360a0579fa8
36,844
import re def parseExcludeAgentCases(spec): """ Parses "exclude-agent-cases" from the spec into a list of pairs of agent pattern and case pattern list. """ if spec.has_key("exclude-agent-cases"): ee = spec["exclude-agent-cases"] pats1 = [] for e in ee: s1 = "^" + e.replace('.', '\.').replace('*', '.*') + "$" p1 = re.compile(s1) pats2 = [] for z in ee[e]: s2 = "^" + z.replace('.', '\.').replace('*', '.*') + "$" p2 = re.compile(s2) pats2.append(p2) pats1.append((p1, pats2)) return pats1 else: return []
a0af6e00d1ff5fe099aa5a4e70beccc4ed72a1fa
36,846
import argparse def get_arguments(): """Parse all the arguments. Returns: A list of parsed arguments. """ parser = argparse.ArgumentParser(description="TF2 Semantic Segmentation") parser.add_argument("--input_size", type=str, default='128,128', help="Input shape: [H, W]") #data config parser.add_argument("--img_path", type=str, default='./data/nuclei_data', help="Path to the directory containing the cityscapes validation images.") parser.add_argument("--num_classes", type=int, default=2, help="Number of classes to predict.") # model config parser.add_argument("--ckpt_path", type=str, default='float/', help="Path to the save the trained weight file.") parser.add_argument("--resume_file", type=str, default=None, help="resume the h5 file.") # others parser.add_argument("--gpus", type=str, default='0', help="choose gpu devices.") parser.add_argument("--batch_size", type=int, default=32, help="batch size for per-iteration training") parser.add_argument("--learning_rate", type=float, default=1e-3, help="base learning rate") parser.add_argument("--epochs", type=int, default=100, help="training epochs") # quantization config parser.add_argument("--quantize", type=bool, default=False, help="whether do quantize or not.") parser.add_argument("--quantize_output_dir", type=str, default='./quantized/', help="directory for quantize output files.") parser.add_argument("--dump", type=bool, default=False, help="whether do dump or not.") parser.add_argument("--dump_output_dir", type=str, default='./quantized/', help="directory for dump output files.") return parser.parse_args()
c83ff2f11e67500fc14c36a3a924ac0fb6719dba
36,849
def construct_parameters(**kwargs): """Translates data to a format suitable for Zabbix API Args: **kwargs: Arguments passed to the module. Returns: A dictionary of arguments in a format that is understandable by Zabbix API. """ if kwargs['mappings'] is None: return dict( name=kwargs['name'] ) return dict( name=kwargs['name'], mappings=[ dict( value=mapping['value'], newvalue=mapping['map_to'] ) for mapping in kwargs['mappings'] ] )
2e92756a61325932e57179e1b0df624a90b6dda8
36,850
from typing import Union from typing import Pattern import re def compile_regex(regex: Union[str, Pattern[str]], flags: int = 0) -> Pattern[str]: """Compile the regex string/object into a object with the given flags.""" if isinstance(regex, Pattern): if regex.flags == flags: return regex regex = regex.pattern return re.compile(regex, flags)
b6aaad538bd7c802d9969e38580c4feb349657a8
36,852
def __diff_strings(str1, str2): """ Compare two strings and return the substrings where they differ (e.g. "ABC/def" and "ABC/ddd" would return "ef" and "dd") """ len1 = len(str1) len2 = len(str2) minlen = min(len1, len2) diff = None for idx in range(minlen): if str1[idx] != str2[idx]: diff = idx break if diff is not None: return str1[diff-1:], str2[diff-1:] if len1 == len2: return "", "" return str1[minlen:], str2[minlen:]
a831513e14aa703d4e15859b493772c484863437
36,853
def reason_is_ne(field: str, expected, got) -> str: """ Create a string that is describes two values being unequal Args: field: the name of the mismatched field expected: the expected value got: the actual value """ return f'{field} mismatch: expected {expected}, got {got}'
2b2a641e97d5e48db1b6b90a9ae501157912c95b
36,854
from typing import Optional def safe_language_tag(name: Optional[str]) -> str: """Convert language names to tags that are safe to use for identifiers and file names. Args: name: Name to convert to a safe name. Can be `None`. Returns: A safe string to use for identifiers and file names. """ if name is None: return "" name = name.lower() return {"c++": "cpp", "objective-c": "objc"}.get(name, name)
ef128910a8b17d41f165147e5ac7eea82677a1d5
36,855
def neighbor8(board: str, index, stone): """紐付きパターンを求めます。中央に石が置いてあれば None です。 Parameters ---------- board: /、空白、改行記号は取り除いておいてください。 """ num = 0 col = index % 13 row = index // 13 # 北西 if 0 <= index-14 and board[index-14] == stone and col != 0 and row != 0: num += 0b00001000 # 北 if 0 <= index-13 and board[index-13] == stone and row != 0: num += 0b00000100 # 北東 if 0 <= index-12 and board[index-12] == stone and col != 12 and row != 0: num += 0b00000010 # 西 if 0 <= index-1 and board[index-1] == stone and col != 0: num += 0b00010000 # 東 if index + 1 < len(board) and board[index+1] == stone and col != 12: num += 0b00000001 # 南西 if index + 12 < len(board) and board[index+12] == stone and col != 0 and row != 12: num += 0b00100000 # 南 if index + 13 < len(board) and board[index+13] == stone and row != 12: num += 0b01000000 # 南東 if index + 14 < len(board) and board[index+14] == stone and col != 12 and row != 12: num += 0b10000000 return num
84e9b2fa22ddb3181ff654a518297e5a6b3b2390
36,856
def filter_connections(connections, annotations): """ Keep connections if they were assigned the 'Equal' label or if they were not annotated. :param list connections: List of candidate connections. :param list annotations: List of annotations from the prodigy db-out command. :returns: Filtered connections. :rtype: list """ # 1 corresponds to the equal label. annotated_idxs = [ann["_input_hash"] for ann in annotations] not_annotated_idxs = [i for i in range(len(connections)) if i not in annotated_idxs] equal_idxs = [ann["_input_hash"] for ann in annotations if ann["answer"] == "accept" and 1 in ann["accept"]] keep_idxs = equal_idxs + not_annotated_idxs cnxs = [connections[i] for i in keep_idxs] return cnxs
36bb0f6b5c99062c9334a0b1ef14d18f18b2737b
36,857
def select_workflow(gi, folder_id, workflow_names, sample, run, lh): """ Select a workflow (either single or paired) based on the number of datasets contained in the current data library folder. """ workflow_name = None # Get the number of dataset within the folder. folder_contents_dict = gi.folders.show_folder(folder_id) num_datasets = folder_contents_dict['item_count'] if num_datasets == 1: workflow_name = workflow_names['SINGLE'] elif num_datasets == 2: workflow_name = workflow_names['PAIRED'] if workflow_name: lh.write('Selected workflow named %s for sample %s of run %s\n' % (workflow_name, sample, run)) return workflow_name, num_datasets
e5e659ab8b2dabd53456d01ed82c2430f4543da4
36,860
import inspect def is_exception(obj): """Check if an object is an exception.""" return inspect.isclass(obj) and issubclass(obj, Exception)
b6e84dbef8b55740d4c7caf13cfdc5efb9c4e47a
36,861
def getNounChunks(row): """ get spaCy noun_chunks for each sent """ chunks = [] sent = row['textDOC'] for chnk in list(sent.noun_chunks): chunks.append(chnk.text) return chunks
e4dbf4e99aac02313d522f07052dcefa8409a767
36,862
def _createModule(module_name, source): """Create and return a module-like object with the given name from the given source code. This module will not appear in sys.modules. If the source code has a syntax error, an exception will be raised. """ # Create a module-like object object and return it. class ModuleWrapper: pass module = ModuleWrapper() module.__dict__ = {} return module
901416a5bdd112cf7ab8b9c54a5ab36fb20e46d8
36,863
def _default_command_dict(name, error_path, output_path, working_dir, h_rt='99:99:99', s_rt='99:99:99'): """Default qsub command dict""" command_dict = {'command': None, 'N': name, 'V': '', 'l h_rt': h_rt, 'l s_rt': s_rt, 'wd': working_dir, 'e': error_path, 'o': output_path} return command_dict
e70ae1e84060c09c413912bf61126c35a1ac39c0
36,864
import re def process_value(row): """Cleanup the value field.""" if row['reference'].upper()[0] == 'R': # Append ohms for resistor values w/o multiplier if re.match('\d*\.*\d$', row['value']): row['value'] = row['value'] + ' ohms' return row
b7d63894f51459db851746681b484b218a5ef51f
36,865
import aiohttp async def request_url(url: str, session: aiohttp.ClientSession) -> dict: """ requests a abuseipdb api url and returns its data :param url: str, abuseipdb api url :param session: aiohttp.ClientSession, client session with api key in header :return: dict, data about an api """ async with session.get(url) as response: if response.status == 200: return await response.json(encoding="utf-8") else: return {}
d47e7610a81690e3ed737a7c895104ecf18f4a06
36,866
def prefix(s1, s2): """ Return the length of the common prefix of s1 and s2 """ sz = len(s2) for i in range(sz): if s1[i % len(s1)] != s2[i]: return i return sz
ea8766c65e8640e7d0c25003389a62058b7117f9
36,867
import re def FirstTextMatch(node_list, search_regex): """Find the first Node in node_list which matches search_regex. Args: node_list: A container of Node objects. search_regex: A regular expression to match with the Node object. Returns: The first Node object in node_list which matches search_regex, or None if none were found. """ regex = re.compile(search_regex) for node in node_list: if regex.search(node.text): return node return None
eb773eb0b5ce783f4df589ecf785c520a33d4750
36,868
def _parse_propertyobj_section(self, section): """Injected into :class:`sphinx.ext.napoleon.docstring.GoogleDocstring` to transform a `Properties` section and add `.. propertyobj` directives (monkeypatching is done in conf.py) """ lines = [] field_type = ':Properties:' padding = ' ' * len(field_type) fields = self._consume_fields() multi = len(fields) > 1 lines.append(field_type) for _name, _type, _desc in fields: field_block = [] field_block.append(f'.. propertyobj:: {_name}') if _type: field_block.extend(self._indent([f':type: {_type}'], 3)) prop_cls = 'Property' if _type == 'dict': prop_cls = 'DictProperty' elif _type == 'list': prop_cls = 'ListProperty' field_block.extend(self._indent([f':propcls: {prop_cls}'], 3)) # field_block.append(f'.. propertyobj:: {_name} -> :class:`~pydispatch.properties.{prop_cls}`(:class:`{_type}`)') field_block.append('') field = self._format_field('', '', _desc) field_block.extend(self._indent(field, 3)) field_block.append('') lines.extend(self._indent(field_block, 3)) return lines
622ceaa1f5f0fca1bb4bd9a83ac0c841a58ffcab
36,869
import numpy def check_diagonal_coulomb(mat: numpy.ndarray) -> bool: """Look at the structure of the two body matrix and determine if it is diagonal coulomb Args: mat (numpy.ndarray) - input two-body Hamiltonian elements Returns: (bool) - whether mat is diagonal Coulomb """ dim = mat.shape[0] assert mat.shape == (dim, dim, dim, dim) for i in range(dim): for j in range(dim): for k in range(dim): for l in range(dim): if i == k and j == l: pass elif mat[i, j, k, l] != 0. + 0.j: return False return True
18b56049b76c61025298a81b27b078e8d1f78442
36,871
def _filter_out_existing_packages(deps_list): """Filter out packages supplied by a public AWS layer""" ignored_packages = ['numpy', 'scipy'] return [dep for dep in deps_list if not any(ignore in dep for ignore in ignored_packages)]
28f4b9fbb303cd427fe99ba90ac080a1a1a99b49
36,872
from functools import reduce import operator def sum_num(lst): """ >>> sum_num([1, 2, 3]) 6 """ return reduce(operator.add, lst)
54b936324a69e8473f22933b1aaade0d10b86727
36,874
def flatten_single(x, begin_axis=1): """ Flatten a tensor in all dimensions from @begin_axis onwards. Args: x (torch.Tensor): tensor to flatten begin_axis (int): which axis to flatten from Returns: y (torch.Tensor): flattened tensor """ fixed_size = x.size()[:begin_axis] _s = list(fixed_size) + [-1] return x.reshape(*_s)
53899640bf8ee5e6b732e4e58257ae33b8285466
36,876
import re def get_base_point(word, page): """ word parsing and counting function Args: word: word what I want to match page: Returns: """ ret = re.sub('[^a-z]', '.', page.lower()).split('.') ret = list(filter(lambda x: x == word.lower(), ret)) return len(ret)
badf001051faf2995063e1968bdeba49246cb07e
36,877
def staff_action_message(row_update: int) -> str: """Message for staff in admin panel after some action""" if row_update == 1: message = '1 row was updated' else: message = f'{row_update} rows were updated' return message
c6a1dc100bfb3e74c21efc65684a6028b547e63e
36,879