content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import math def cosine_decay_restarts( steps_for_cycle, max_epochs, increase_restart_interval_factor=2, ): """Learning rate factors for cyclic schedule with restarts.""" lr_factors = [] step = 0 cycle = 0 for _ in range(0, max_epochs + 1): step += 1 completed_fraction = step / steps_for_cycle cosine_decayed = 0.5 * (1 + math.cos(math.pi * completed_fraction)) lr_factors.append(cosine_decayed) if completed_fraction == 1: step = 0 cycle += 1 steps_for_cycle = steps_for_cycle * increase_restart_interval_factor return lr_factors
0cb40cdbbd733f08d94afa4c810bf04e2701efa2
68,982
def filter_optimized_results(df, err_param_name, score_name, is_higher_score_better): """Removes suboptimal rows from the dataframe, returning only the best ones. Args: df (pandas.DataFrame): A dataframe containing all the data returned by the runner. err_param_name (str): The name of the error parameter by which the data will be grouped. score_name (str): The name of the score type we want to optimize. is_higher_score_better (bool): If true, then only the highest results are returned. Otherwise the lowest results are returned. Returns: pandas.DataFrame: A dataframe containing the optimized results. """ if is_higher_score_better: df_ = df.loc[df.groupby(err_param_name, sort=False)[score_name].idxmax()].reset_index(drop=True) else: df_ = df.loc[df.groupby(err_param_name, sort=False)[score_name].idxmin()].reset_index(drop=True) df_.name = df.name return df_
6e744c97a243c8dd5e8d1dfe1f5fa7e85f6d6fee
68,986
def find_home_listing_urls(soup): """ Finds all the relative individual house data links on a landing page. :param soup: a bs4 Soup object :return: list of strings. Each string is a relative URL representing one house listing. """ listing_url_list = [] for url in soup.find_all("a", class_="cover-all"): listing_url_list.append(url['href']) return listing_url_list
9b4072e2f9167bc1c59eae7618a07826d001ef42
68,987
from datetime import datetime def validateTimecode(timecode): """Checks if a string is a timecode of format [%M:%S.%f]""" try: x = datetime.strptime(timecode, '[%M:%S.%f]') return True except ValueError: return False
2c363f9b0145ed50a8d9ffdb589d89a01335c394
68,989
def compute_overlap(mapping, box1, box2): """Computes overlap between the two boxes. Overlap is defined as % atoms of box1 in box2. Note that overlap is not a symmetric measurement. """ atom1 = set(mapping[box1]) atom2 = set(mapping[box2]) return len(atom1.intersection(atom2)) / float(len(atom1))
fc612767e6011883b3d2dddaf7530ae379fb32e5
68,990
def speech_bubble(msg_lines): """Wraps the lines of a message in an ASCII speech bubble.""" msg_width = max([len(x) for x in msg_lines]) lines = [] lines.append(" " + (msg_width + 2) * "_" + " ") lines.append(" /" + (msg_width + 2) * " " + "\\") for line in msg_lines: lines.append(" | " + line.center(msg_width) + " |") lines.append(" \\" + (msg_width + 2) * "_" + "/") return lines
b30a73c87714048a0b190e65816ad0d1dcac8d46
68,994
import functools import time def timeit(text): """ Meauring function execution time and printing it.""" def decorator_timeit(func): @functools.wraps(func) def timed(*args, **kw): ts = time.time() result = func(*args, **kw) te = time.time() timeresult = te-ts print(f"{text}::time spent {timeresult:.2f} seconds") return result return timed return decorator_timeit
782220e20ded0fdd3efe249cc4642bf0d50e6282
68,996
def stem(word): """ Stem word to primitive form """ return word.lower().rstrip(",.!:;'-\"").lstrip("'\"")
362a1d1c826c90627e5a05a40c973abc43ce4d76
69,000
def short_to_decimal(code): """ Convert an ICD9 code from short format to decimal format. """ if len(code) <= 3: return code.lstrip("0") else: return code[:3].lstrip("0") + "." + code[3:]
900ab58dcb59a49b5475d71d08e5720657e03f9e
69,002
def HumanizeBytes(totalBytes, precision=1, suffix=None): """ Convert a number of bytes into the appropriate pretty kiB, MiB, etc. Args: totalBytes: the number to convert precision: how many decimal numbers of precision to preserve suffix: use this suffix (kiB, MiB, etc.) instead of automatically determining it Returns: The prettified string version of the input """ if (totalBytes == None): return "0 B" converted = float(totalBytes) suffix_index = 0 suffix_list = ['B', 'kiB', 'MiB', 'GiB', 'TiB'] while (abs(converted) >= 1000): converted /= 1024.0 suffix_index += 1 if suffix_list[suffix_index] == suffix: break return "{0:.{1}f} {2}".format(converted, precision, suffix_list[suffix_index])
cf1d1f7cb2a3c401cfbd3bd88cd286fee2e7a22d
69,003
def _parse_setup_lines(lines): """Return a list of the setup names""" setups = [] for l in lines: if 'Setup' in l: tsetup = l.split()[1].strip() # Remove any lingering colon if tsetup[-1] == ':': setup = tsetup[:-1] else: setup = tsetup setups.append(setup) # return setups
630d5004d2864a87d3b2e80d706ea20a4932dad3
69,004
def unicodeToByteArray(u): """ Simple conversion of unicode to bytearray: utf8 encoding """ return bytearray(u.encode("utf-8"))
67bb13a4210388ee50addf6ead4fccfccf247118
69,005
def aligned(source: tuple, target: tuple) -> bool: """True if positions are aligned orthogonally or diagonally.""" row, col = source target_row, target_col = target if row == target_row or col == target_col: return True # test diagonals delta = row - target_row if (col + delta == target_col) or (col - delta == target_col): return True return False
c1a0c2cdb3b0eb399e30921e0e86d02c0b7dab05
69,006
import calendar def datetime_to_ts(dt): """Convert internal DB timestamp to unixtime.""" if dt: return calendar.timegm(dt.utctimetuple())
b186dfdcf62174be08380425f24b3c251d44933f
69,008
def strip_none_keys(args): """ Usefull function to interact with coreapi. While it's ok to pass function arguments with default equal to None, it is not allowed to pass it over coreapi. So we have to strip keys with None values. """ return {k: v for k, v in args.items() if v is not None}
03927757877becb4440a7c76e458cfa813db3f59
69,009
def check_distortion(list_data): """ Check if the distortion is significant or not. If the number of dots having the residual greater than 1 pixel is greater than 15% of the total number of dots, there's distortion. Parameters ---------- list_data : array_like List of [radius, residual] of each dot. Returns ------- bool """ check = False res_list = list_data[:, 1] perc_err = ( 1.0 * len(res_list[res_list > 1.0]) / len(res_list)) if perc_err > 0.15: check = True return check
84f67aea2fb2c0dd807be198b4aa9e33755ce116
69,014
def is_complete_v4_key(v4_key): """Returns True if a key specifies an ID or name, False otherwise. Args: v4_key: an entity_v4_pb.Key Returns: True if the key specifies an ID or name, False otherwise. """ assert len(v4_key.path_element_list()) >= 1 last_element = v4_key.path_element(len(v4_key.path_element_list()) - 1) return last_element.has_id() or last_element.has_name()
5931b18e45e8febe1edffdbe90eb0bb19f98c819
69,018
def _to_name(tool) -> str: # type: ignore """Gives the tool name for a function by taking the function name and replacing underscores with hyphens.""" return tool.__name__.replace("_", "-")
00cbe8833bdaee8dd36dd1b7b9f202c28d732710
69,026
import hmac import hashlib def gen_signature(api_key_secret, timestamp, verb, path, body=""): """ Signs the request payload using the api key secret api_key_secret - the api key secret timestamp - the unix timestamp of this request e.g. int(time.time()*1000) verb - Http verb - GET, POST, PUT or DELETE path - path excluding host name, e.g. '/v1/withdraw body - http request body as a string, optional """ payload = "{}{}{}{}".format(timestamp, verb.upper(), path, body) message = bytearray(payload, 'utf-8') signature = hmac.new(bytearray(api_key_secret, 'utf-8'), message, digestmod=hashlib.sha512).hexdigest() return signature
db98ab54b59caccb11f638c2574c81808d32f9a9
69,032
def consistent_dims(value_items): """ Get dimensions used consistently across all value objects. Returns None if dimensions are omitted or added for some value object(s). """ used = set(k for k in value_items[0] if k != "value") for vo in value_items: if used != set(k for k in vo if k != "value"): return None return used
2cbf103076e9b8acf72e942368aba7e5da2f4788
69,038
def mapping_wid2tid(doc): """ create mapping from w_id to t_id :param lxml.etree._ElementTree doc: XML document loaded by lxml.etree :rtype: dict :return: w_id -> t_id :raises: Exception when a w_id maps to more than one t_id :raises: Exception when a t_id maps to more than one w_id """ wid2tid = {} for term_el in doc.xpath('terms/term'): t_id = term_el.get('id') target_els = term_el.findall('span/target') assert len(target_els) == 1, f'expecting one target el per term, found {len(target_els)}' for target_el in term_el.xpath('span/target'): w_id = target_el.get('id') if w_id in wid2tid: raise Exception(f'{w_id} maps to two or more t_ids') else: wid2tid[w_id] = t_id return wid2tid
9a4d800a193b47c19ea47d7df9a4d1490e0c8d93
69,048
from typing import Optional def default_primitive_deserializer(obj: object, cls: Optional[type] = None, **kwargs) -> object: """ Deserialize a primitive: it simply returns the given primitive. :param obj: the value that is to be deserialized. :param cls: not used. :param kwargs: not used. :return: ``obj``. """ return obj
40dd35631e3260f5abd883c56a8c342c785315d2
69,052
def encode_powerset_labels(y): """Encode the array y as a collection of concatenated label strings including each stance where the stance is 1. The label format is suitable for feeding to FastText.""" return ['__label__' + '_'.join(map(str, y[i, :])) for i in range(y.shape[0])]
711d42f572f9527b89667be7f3bce84081eb0b2a
69,055
def four_gametes_test(bases1 :list, bases2 :list): """ Whether 2 loci are incompatible by computing a 4-gametes test on 2 informative polymorphisms. Parameters: bases1 (list) bases2 (list) Output: boolean: the 2 loci are incompatible """ assert len(bases1) == len(bases2), "Not the same number of elements." c = [] for i in range(len(bases1)): c.append((bases1[i], bases2[i])) return(len(set(c)) > 3)
b5906ba1eef5db2070e27a0f45f5d5911c4a68cc
69,056
import tempfile def get_spooled_file_object(s3_client, bucket, key): """Get a temporary spooled file object for an S3 object :param s3_client Boto s3_client object to apply :param bucket: Bucket to upload to :param key: key identifying the object within the bucket """ result = tempfile.SpooledTemporaryFile() s3_client.download_fileobj(bucket, key, result) result.seek(0) return result
51d3478d798fb0e3e6692ef304343f65ce1466a8
69,064
def thr(x): """ Returns the third item of a collection. Examples -------- >>> thr([0, 1, 2]) 2 """ return x[2]
dc167289ecf4a39999757bc2f2525e39cba186f5
69,065
def prepare_feed_dict(model, features, labels=None, is_training=None): """Prepares a feed_dict for sess.run() given a batch of features and labels. Args: model: An instance of AstroModel. features: Dictionary containing "time_series_features" and "aux_features". Each is a dictionary of named numpy arrays of shape [batch_size, length]. labels: (Optional). Numpy array of shape [batch_size]. is_training: (Optional). Python boolean to feed to the model.is_training Tensor (if None, no value is fed). Returns: feed_dict: A dictionary of input Tensor to numpy array. """ feed_dict = {} for feature, tensor in model.time_series_features.items(): feed_dict[tensor] = features["time_series_features"][feature] for feature, tensor in model.aux_features.items(): feed_dict[tensor] = features["aux_features"][feature] if labels is not None: feed_dict[model.labels] = labels if is_training is not None: feed_dict[model.is_training] = is_training return feed_dict
a16137ac3a9f1cde691f2d1e24d825af9044bd0f
69,067
def mocked_api(mocker): """Mock object that patches the channels API""" return mocker.patch("search.tasks.api")
5fcf23040b73621253a7729cbf9ce2945aa1a3c1
69,068
def _escape_censys_value(value): """Escapes necessary characters for a censys search """ escape_strings = ["+", "-", "=", "&", "|", ">", "<", "!", "(", ")", "{","}","[","]","^", "\"", "~", "*", "?", ":", "\\", "/"] escape_dict = {} for escape_string in escape_strings: escape_dict[escape_string] = "\\" + escape_string return value.translate(str.maketrans(escape_dict))
6d026b9764e59a445f68eb4f6b002ce939203bf2
69,070
import torch def rot180(input): """ Rotate a tensor image or a batch of tensor images 180 degrees """ return torch.flip(input, [-2, -1])
e2dcf8020e70d0be059083e7a509d9c1a738e483
69,071
def graph_filename_url(site_id, base_graph_name): """This function returns a two-tuple: graph file name, graph URL. The graph file name is used to save the graph to the file system; the graph URL is used in an HTML site report to load the graph into an image tag. Parameters: 'site_id': the Site ID of the site this graph is related to. 'base_graph_name': a graph file name, not including the Site ID and not including the 'png' extension. For example: 'eco_g1', which will produce a graph file name of 'ANSBG1_eco_g1.png' assuming the Site ID is ANSBG1. """ fn = 'output/images/{}_{}.png'.format(site_id, base_graph_name) url = 'images/{}_{}.png'.format(site_id, base_graph_name) return fn, url
237b9bf109847ea99279e395c9ad5b23d6b96cd3
69,073
import requests def get_n_annotations(url): """Get the number of playbills results annotations on the server.""" r = requests.get(url) r.raise_for_status() n_annotations = r.json()['total'] return n_annotations
0dd402dbc928be462ee21ae6f4a6cba9d0e8fd03
69,076
import re def camel_to_snake_case(camel_case_name: str) -> str: """ Converts a name with Capital first letters to lower case with '_' as separators. For example, CamelToSnakeCase -> camel_to_snake_case. """ return (camel_case_name[0] + re.sub(r'([A-Z])', r'_\1', camel_case_name[1:])).lower()
99af1a2e2f91bb7334419964d31064d0d9f27e3a
69,081
def hinge_loss(predicted_value: float, real_label: float) -> float: """ Computes hinge loss for given predicted value, given the real label. :param predicted_value: inner product of data sample and coefficient vector, possibly corrected by intercept :param real_label: Real label :return: Hinge loss of datapoint """ return max(0, 1 - real_label * predicted_value)
2eadb28690b5ceab49547e69f60d6fc0edf77938
69,092
def get_selector_root(sel): """ Return lxml Element for a Selector """ if not hasattr(sel, 'root'): # scrapy.Selector, scrapy < 1.1 return sel._root # parsel.Selector or scrapy.Selector, scrapy >= 1.1 return sel.root
1cd94f9a50128a79b8df37caacc50ed006bee730
69,097
def basic_mesa_f_files(mesa_dir): """ Returns list of selected MESA .f90 files & standard_run_star_extras. These ones provide a nice skeleton of each timestep. """ basic_files = ['public/star_lib.f90', 'job/run_star.f90', 'job/run_star.f', 'job/run_star_support.f90', 'private/evolve.f90'] basic_files = [mesa_dir + '/star/' + x for x in basic_files] basic_files.append(mesa_dir + '/include/standard_run_star_extras.inc') return basic_files
c05aaf66ddbedbb8fc776050240b5eeefc15eeac
69,100
def _GetClearedFieldsForFaultAbort(fault_abort, field_prefix): """Gets a list of fields cleared by the user for HttpFaultAbort.""" cleared_fields = [] if not fault_abort.httpStatus: cleared_fields.append(field_prefix + 'httpStatus') if not fault_abort.percentage: cleared_fields.append(field_prefix + 'percentage') return cleared_fields
6cbdabc103720f861e7417ef39a7f0f05404bab2
69,101
def remove_nulls(n): """Swaps nulls with zeros.""" if n == 'null': return 0 return n
10206d6db6a03e31fc66fc995250d4753263af84
69,111
def add_fit_args(parser): """ parser : argparse.ArgumentParser return a parser added with args required by fit """ args = parser.add_argument_group('Training', 'model training') args.add_argument('--network', type=str, help='the neural network to use') args.add_argument('--dataset', type=str, default='NTU', help='select dataset to evlulate') args.add_argument('--start-epoch', default=0, type=int, help='manual epoch number (useful on restarts)') args.add_argument('--max-epoches', type=int, default=150, help='max number of epochs to run') args.add_argument('--lr', type=float, default=0.1, help='initial learning rate') args.add_argument('--lr-factor', type=float, default=0.1, help='the ratio to reduce lr on each step') args.add_argument('--weight-decay', '--wd', type=float, default=1e-4, help='weight decay (default: 1e-4)') args.add_argument('--print-freq', '-p', type=int, default=10, help='print frequency (default: 10)') args.add_argument('-b', '--batch-size', type=int, default=256, help='mini-batch size (default: 256)') args.add_argument('--num-classes', type=int, default=11, help='the number of classes') args.add_argument('--case', type=int, default=0, help='select which case') args.add_argument('--train', type=int, default=1, help='train or test') args.add_argument('--workers', type=int, default=2, help='number of data loading workers (default: 2)') args.add_argument('--monitor', type=str, default='val_acc', help='quantity to monitor (default: val_acc)') args.add_argument('--seg', type=int, default=20, help='number of segmentation') return args
4211682a85cb1198f065102b880bb80a7553fed7
69,112
def avg_colour(pixel): """ Returns the average of RGB values of that corresponding pixel. """ return (pixel.red + pixel.blue + pixel.green) / 3
86bc8ee7286b13b18ccb1e0754c0f0d5c44a743a
69,114
def PILColorToRGB(pil_color): """ convert a PIL-compatible integer into an (r, g, b) tuple """ hexstr = '%06x' % pil_color # reverse byte order r, g, b = hexstr[4:], hexstr[2:4], hexstr[:2] r, g, b = [int(n, 16) for n in (r, g, b)] return (r, g, b)
631995a200251e0534b3c982292c6e5b71ac867b
69,115
def correct_braces(string): """ Check if string has correct braces "{" and "}" :param string: String with braces to be chekced :return: true if braces are correct, otherwise false """ if string is None: raise ValueError("String to check correct braces was None") braces_count = 0 quotation = False for character in string: if character == '{' and not quotation: braces_count += 1 if character == '}' and not quotation: braces_count -= 1 if character == '"': quotation = not quotation if braces_count < 0: return False return braces_count == 0
34ad73816724c618bcdbd797dab75e55e705575b
69,118
def create_standard(i): """ create a standard matrix where all entries except the diagonal are 0, diagonals are 1 :param i: number of rows and columns :return: list of lists of numerical values """ output_matrix = [[0 for j in range(i)] for b in range(i)] for a in range(i): output_matrix[a][a] = 1 return output_matrix
5c3902e6c34d1199fe02ace375c42e248abc7f47
69,119
def reconstruct_lcs_solution(table, string_a, string_b): """ to reconstruct the solution (actual string) 1. start at table[-1][-1] for max length 2. if string_a[i] == string_b[j], chars match, then the current position we are at is associated with table[i-1][j-1] (as to make the table we extend the max sub seq by 1 when they machted) so update our current position and add the common character we were at to output string_a[i] is added to output 3. if not match, then the number would have come from max(left, top) so move there and update our position nothing has been added to LCS continue until reaching 0 (where table[i][j] = 0 or i or j, boundary case) and we are done Time Complexity to make solution: O(a+b) where a is len(string_a) and b is len(string_b) this is because we loop until i or j = 0 and each pass decrements one of i or j or both so at worst bound is when only one of i or j decrements each pass which is O(i + j) :return: longest common subsequence """ solution = [] i = len(string_a) j = len(string_b) # remember table has i columns indexed from 0 to i while table[i][j] > 0: if string_a[i-1] == string_b[j-1]: # remember need to index-1 for strings, only the table has + 1 row, col solution.append(string_a[i-1]) # add common element to solution i -= 1 # decrement i and j (go to top left diagonal) j -= 1 else: # no match top = table[i-1][j] left = table[i][j-1] if top >= left: i -= 1 # decrement j to update our position else: # top < left j -= 1 # decrement i to update our position return solution[::-1]
9b14a89089bf49c2b6286a5823b2463b7ccd09e5
69,121
def color_palette(color=1, shade=1): """ Return a color palette form a selected color and shade level. :param int color: (Optional.) 0=Blue, 1=Deep Orange, 2=Green, default is 1. :param int shade: (Optional.) 0=light, 1=regular, 2=dark, default is 1. :rtype: str :return: Hex color code. """ palette = [ ['#90CAF9', '#2196F3', '#1565C0'], # Blue ['#FFAB91', '#FF5722', '#D84315'], # Deep Orange ['#A5D6A7', '#4CAF50', '#2E7D32'], # Green ] return palette[color][shade]
55298a85f212e939aa673f7e15d872daac921940
69,124
def clean_line(line): """Strip whitespace off a line and separate out the comment, if any.""" if "//" in line: line, _sep, comment = line.partition("//") if "/*" in line: line, _sep, comment = line.partition("/*") comment, _sep, _trash = comment.partition("*/") else: comment = "" return line.strip(), comment.strip()
0fd4759d5e12b14cb89d2bd2db11fbec09a471d9
69,129
import json def index_by_rev_id(s): """Given a json string of a dict with a rev_id key, pair the rev_id with the string""" return (json.loads(s)["rev_id"], s)
00489cea75ca699c569e9e3747b085cabfe6a4f8
69,130
def strip_unneeded(text: str) -> str: """Get rid of unneeded characters in text and return it. """ text = text.strip().replace("+", "").replace(",", "").replace(" ", "") if not text: text = "0" return text
1fed2b249f2ba1969242f847ec1636a655508d46
69,133
import socket def IsIP6Addr(addr): """Returns true for valid IPv6 addresses.""" try: socket.inet_pton(socket.AF_INET6, str(addr)) except socket.error: return False return True
bb1ddf4c75ed7e4fdc72d9f27d1f0eb2118b5626
69,135
def get_attribute(metainfo, name, multiple=False): """ Return the value for the attribute ``name`` in the ``metainfo`` mapping, pkginfo object or email object. Treat the value as a list of multiple values if ``multiple`` is True. Return None or an empty list (if multiple is True) if no value is found or the attribute ``name`` does not exist. Ignore case (but returns the value for the original case if present. """ # note: the approach for this function is to be used with the various # metainfo objects and dictionsaries we use that can be a # pkginfo.Distribution, an email.message.EmailMessage or a dict. # Because of that, the key can be obtained as a plain named attribute, # either as-is or lowercased (and with dash replaced by dunder) or we # can use a get on dicts of emails. def attr_getter(_aname, default): _aname = _aname.replace('-', '_') return ( getattr(metainfo, _aname, default) or getattr(metainfo, _aname.lower(), default) ) def item_getter(_iname, getter, default): getter = getattr(metainfo, getter, None) if getter: return getter(_iname, default) or getter(_iname.lower(), default) return default if multiple: return ( attr_getter(name, []) or item_getter(name, 'get_all', []) or item_getter(name, 'get', []) or [] ) else: return ( attr_getter(name, None) or item_getter(name, 'get', None) or None )
3ecc8f8e956428baf204b994bb6cfc37e487ddd0
69,138
def collect_letters(task: dict) -> str: """ Alle verwendeten Buchstaben in der Aufgabe sammeln. Die Buchstaben werden sortiert in optimaler Lösungsreihenfolge. Zuerst kommen die Buchstaben, die rechts in den Summen benötigt werden. Sind die ersten Buchstaben bekannt, können weitere Stellen durch rechnen gefunden werden. :param task: Die Aufgabe. :return: Alle Buchstaben. """ result = [] # Von rechts die Buchstaben in den Summanden for index in reversed(range(len(task["calculate_letter_sum"]))): for summand in task["summands"]: c = summand[index] if c.isalpha() and c not in result: result.append(c) # Dann die Buchstaben in der Summe for c in task["calculate_letter_sum"]: if c.isalpha() and c not in result: result.append(c) return "".join(result)
67d4c4546a49c6219ce6a370c47bbd49ce7cb292
69,141
import torch def swish(x): """Swish act. fn. -> smoothing fn. that nonlinearly interpolates between linear and ReLU fn.""" return x * torch.sigmoid(x)
879ab2ed62721247d3cbabee7c1cfb3b273d3d5f
69,147
def delf(text): """remove carriage-returns and single line-feeds from text - leaves double line-feeds (paragraph breaks) - also removes trailing spaces - source must be a string """ # remove trailing spaces lines=[line.rstrip() for line in text.split('\n')] cleantext='\n'.join(lines) # remove carriage returns and single line feeds return cleantext.replace('\r','').replace('\n\n','\r').replace('\n',' ').replace('\r','\n\n')
6943f77d1c00ed2e52088953700fa09e2ad221d4
69,148
def getFoodNames(cursor): """Get dictionary from food id to food name.""" cursor.execute("SELECT id, name FROM Food") return dict([v.values() for v in cursor.fetchall()])
e3bbd6d655747cbb1350bc294e5ce86c0931b209
69,149
def zpad(x, l): """ Left zero pad value `x` at least to length `l`. """ return b"\x00" * max(0, l - len(x)) + x
34b221980eb4dc29a0aa40adb8c3e23403e87c45
69,151
import random def randomProblem(rows=10, columns=10, max=1000): """ Generate a random matrix, with the specified number of rows and columns. Each number is distributed uniformly at random between zero and the specified maximum. """ result = [] for i in range(rows): resultRow = [] for j in range(columns): resultRow.append(random.randint(0, max)) result.append(resultRow) return result
ad4471072c0fc1784b839aa95e150122ce9e01a1
69,155
def str2bool(string): """ Converts a string to a boolean :param string: String to convert :type string: `str` :return: Boolean equivalent :rtype: `bool` """ return not (string in ["False", "false", 0, "0"])
85e99286da881301b65e83704616ab815f79012c
69,156
from typing import Tuple import torch def get_last_layer(sequence_outputs: Tuple[torch.Tensor]) -> torch.Tensor: """Returns the last tensor of a list of tensors.""" return sequence_outputs[-1]
ece4e23cf8f7edb6565927053d9fe95dbbd22f5c
69,157
def test_for_blank_line(source: str) -> bool: """ Returns True if 'source' is effectively a blank line, either "\n", " ", or "", or any combination thereof. Returns False, otherwise. """ return not bool(source.strip())
dff2ee6f16be8beaf7e4e09bc68dfc73a5df7d0a
69,158
import string def punctuation_free(reference): """Function takes a caption and outputs punctuation free and lower cased caption""" text = reference.split() x = [''.join(c.lower() for c in s if c not in string.punctuation) for s in text] return x
e13a8866526294d342cef0779d62cb2cbc6dc562
69,160
def outgroup_reformat(newick, outgroup): """ Move the location of the outgroup in a newick string to be at the end of the string Inputs: newick --- a newick string to be reformatted outgroup --- the outgroup """ # Replace the outgroup and comma with an empty string newick = newick.replace(outgroup + ",", "") newick = newick[:-2] + "," + outgroup + ");" return newick
944b0e71558c63d51f5773387441d766f0b31eea
69,165
def _shard_kwargs(shard_idx: int, kwargs: dict) -> dict: """Return a copy of the input kwargs but with only one shard""" # Having lists of different sizes makes sharding ambigious, raise an error in this case # until we decide how to define sharding without ambiguity for users lists_lengths = {key: len(value) for key, value in kwargs.items() if isinstance(value, list)} if len(set(lists_lengths.values())) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items()) + "\nTo fix this, check the dataset script 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) return {key: [value[shard_idx]] if isinstance(value, list) else value for key, value in kwargs.items()}
b4db851cfda4eb8b1e4bda73f95e822363230970
69,167
def get_row_pct(row_sizes): """Get the span of each chromosome as a percentage of the span of the largest row :param list row_sizes: List of lists of chromosome sizes by row :returns: List of lists of chromosome sizes as percentages by row """ biggest_row = max([sum(row) for row in row_sizes]) row_pcts = [[float(val) / biggest_row for val in row] for row in row_sizes] return row_pcts
cd27566154b3d8b29063b06b66a9acc1381d9d4e
69,168
def filter_layer_collections_by_object(layer_collections, obj): """Returns a subset of collections that contain the given object.""" return [lc for lc in layer_collections if obj in lc.collection.objects.values()]
1d51557fba7e2c207f512673dfdb890af6ab53a8
69,170
import time import logging def is_even(n: int) -> bool: """ Checks if the number 'n' is even or odd :param n: The number to check :return: Whether (True) or not (False) the number is even """ time.sleep(0.1) if n % 2 == 0: logging.info(f"{n} - is even") return True else: logging.info(f"{n} - is odd") return False
25fb81ad343aba92308fbb3c4f8878df786bb2fe
69,175
import glob def load_videos(videos_dir: str): """ Return video file paths from provided directory :param videos_dir: :return: """ videos = [] for x in glob.glob(videos_dir + "*"): videos.append(x.replace('\\', '/')) return videos
1724109bcccc4e0daaac6bdebf3689444753d5ed
69,178
def create_utc_datetime(datetime): """Creates TZAware UTC datetime object from unaware object.""" assert datetime.tzinfo is None return datetime.replace(tzinfo=datetime.timezone.utc)
a0f465b95b25a04823d43985edb4a416672277f8
69,180
def get_model_output(context, full_output, tokenizer): """ Given the context and the full model output (context + generated), extract just the generated tokens. Remove the last token if it is <|endoftext|> """ ret = full_output[len(context):] if ret[-1] == tokenizer.eos_token_id: ret = ret[:-1] return ret
944bf5c9ac2f8bee3e6f491f1fdba57fb2f913a5
69,181
def validateLongText(article, minLength=2, maxLength=500): """ 验证 文章长度 :param article: 文章 :param minLength: 最小长度(默认:2) :param maxLength: 最多长度(默认:500) :return: bool 这是一个简单的例子: Example ------- >>> validator.validateLongText("飞翔吧,骄傲的海鸥",minLength=2,maxLength=500) True """ strLen = len(article.encode('utf-8').decode("utf-8")) if strLen < minLength or strLen > maxLength: return False else: return True
bd3af4f77aca868697e40f522e04a62cf01c79c7
69,182
def fval_function(sN, weight): # IMPLEMENT """ Provide a custom formula for f-value computation for Anytime Weighted A star. Returns the fval of the state contained in the sNode. @param sNode sN: A search node (containing a SokobanState) @param float weight: Weight given by Anytime Weighted A star @rtype: float """ fval = sN.gval + weight * sN.hval return fval
4447e2b64d287a2d86b754267e674f074757d007
69,183
import math def distance(a,b) -> float: """Distance between point a and point b""" x,y = 0,1 return math.sqrt((a[x] - b[x])**2 + (a[y] - b[y])**2)
c5ad556750ae3f8a94bd3a503d033641ef0218a4
69,184
def _get_caption(table): """ Get the caption of the associated wiki table. Parameters ---------- table: BeautifulSoup Returns ------- str """ caption_text = None caption = table.find("caption") if caption: caption_text = caption.text.strip() return caption_text
552925afb8e788e2f6cf03ff84ca1a4062f085ff
69,189
def determine_pubmed_xml_type(xmlstr): """ Returns string "type" of pubmed article XML based on presence of expected strings. Possible returns: 'article' 'book' 'unknown' :param xmlstr: xml in any data type (str, bytes, unicode...) :return typestring: (str) :rtype: str """ if type(xmlstr)==bytes: xmlstr = xmlstr.decode() if '<PubmedBookArticle>' in xmlstr: return 'book' elif '<PubmedArticle>' in xmlstr: return 'article' return 'unknown'
dfe6b8c05f02c2dda81c6b33a8ac6ee6ac5ad93e
69,191
def find_unit_clause(clauses, model): """Find a unit clause has only 1 variable that is not bound in the model. Arguments are expected to be in integer representation. >>> find_unit_clause([{1, 2, 3}, {2, -3}, {1, -2}], {1: True}) (2, False) """ bound = set(model) | {-sym for sym in model} for clause in clauses: unbound = clause - bound if len(unbound) == 1: p = unbound.pop() if p < 0: return -p, False else: return p, True return None, None
607a1b26b44dace13c06ab029850b9e9ca8f20b1
69,192
def compatible(cluster_a, value_a, cluster_b, value_b): """ Checks compatibility of clusters of variables. Compatibility means that values agree on common variables. """ for node in list(cluster_a): position_a = cluster_a.index(node) if node in list(cluster_b): position_b = cluster_b.index(node) if value_a[position_a] != value_b[position_b]: return False return True
4291faca23e5d48e0b26f566cfb15e5b9a14a1ea
69,195
import re def is_a_define_statement(match, body): """See if the matching line begins with #define""" # This does not yet help with multi-line defines m = re.search(r"^#define.*{}$".format(match.group(0)), body[:match.end()], re.MULTILINE) return m is not None
8b564af89bd3c289368ffeffe6f902deda947fef
69,196
from typing import Counter def gc_content(text: str) -> float: """Return the GC content of the given text Arguments: text {str} -- DNA string Returns: float -- GC content rounded to 5 decimal places Example: >>> gc_content("ACAACTATGCATACTATCGGGAACTATCCT") 40.0 """ counts = Counter(text.upper()) gc_count = int(counts['G'] + counts['C']) gc_content = round(gc_count / len(text) * 100, 5) return gc_content
49d08bdeeb249fa73d5a21172af3e509170af021
69,197
from typing import OrderedDict def specification_values_dict(instance): """ Returns a dictionary suitable for outputting the specification field values. """ groups = OrderedDict() for field in instance.fields.select_related("field__group"): groups.setdefault(field.group, []).append( (field.name, field.get_value_display()) ) return groups
fdb6a6ad3ca15226e1f9fc0a87b136cff566245b
69,198
import operator def dot(u, v): """ Returns u . v - the dot product of vectors u and v. """ return sum(map(operator.mul, u, v))
9ebfa6eae13293c5b2e91648969fb264c4ca712b
69,200
import yaml def _load_yaml(filename: str) -> dict: """Yaml filename loader helper. Parameters ---------- filename (str) Yaml file to load Returns ------- dict YAML content as Pythonic dict """ with open(filename) as yaml_file: return yaml.full_load(yaml_file)
253b4bd63d6c97dc30f566af1800bde976562945
69,201
def model_variables(price_model, hlcm, elcm, wah_lcm, developer_access_cols): """ Returns a set of the columns/variables used by various models. """ return set().union(*[ price_model.columns_used(), hlcm.columns_used(), elcm.columns_used(), wah_lcm.columns_used(), developer_access_cols ])
4056754e8ca70a9b8938f6d06aa821378ee57f9f
69,202
def get_scope(fname): """ Return the scope of the res file so that results on the same dataset can be paired. """ city = fname[:2] if 'targetbase' in fname: return city, 'base' elif 'targetcategory' in fname: return city, 'category' else: raise ValueError('%s do not have a scope' % fname)
d93567b3ee40db126714eb41c43a3945838307ba
69,206
def vector_sum(vector1, vector2, coeff=1.0): """ Sums the vectors. This function computes the result of the vector operation :math:`\\overline{v}_{1} + c * \\overline{v}_{2}`, where :math:`\\overline{v}_{1}` is ``vector1``, :math:`\\overline{v}_{2}` is ``vector2`` and :math:`c` is ``coeff``. :param vector1: vector 1 :type vector1: list, tuple :param vector2: vector 2 :type vector2: list, tuple :param coeff: multiplier for vector 2 :type coeff: float :return: updated vector :rtype: list """ summed_vector = [v1 + (coeff * v2) for v1, v2 in zip(vector1, vector2)] return summed_vector
bbff7101cbb85f35148d1cd91057c2e43f9df7de
69,207
import logging def yaml_from_csv(csv_dict): """ Defining necessary to retrieve a value (given by field name) from a dict Ex (in YAML file): my_key: !from_csv field_name """ def _yaml_loader(loader, node, csv_dict=csv_dict): # If value not exists, store the error if csv_dict.get(node.value, None) is None: logging.error( "YAML file CSV reference '%s' missing. Can be given with option \ '--extra-config=<YAML>'. YAML content example: '%s: <value>'", node.value, node.value) # We don't replace value because we can't... return node.value else: # No error, we return the value return csv_dict[node.value] return _yaml_loader
7ea0ef2b632241ed0f351a813d54e0efaaa7eeee
69,217
def convert_epa_unit(df, obscolumn="SO2", unit="UG/M3"): """ converts ppb to ug/m3 for SO2 in aqs and airnow datasets See 40 CFR Part 50.5, Appendix A-1 to part 50, appendix A=2 to Part 50. to convert from ppb to ug/m3 multiply by 2.6178. Also will convert from ug/m3 to ppb. Parameters ---------- df : pandas dataframe self.df attribute from aqs or airnow class. obscolumn : string name of column with SO2 data in it. unit : string either 'UG/M3' or 'PPB' (not case sensitive) will convert data to this unit. inplace : boolean if TRUE then changes self.df attribute Returns ------- df : pandas dataframe returns dataframe identical to original but with data converted to new unit. """ factor = 2.6178 ppb = "ppb" ugm3 = "ug/m3" if unit.lower() == ugm3: df = df[df["units"] == ppb] # find columns with units of 'ppb' df["units"] = unit.upper() df[obscolumn] = df[obscolumn] * factor elif unit.lower() == ppb: df = df[df["units"] == ugm3] # find columns with units of 'ppb' df[obscolumn] = df[obscolumn] / factor return df
4af9811c3ae465904b3320cc6d5dd0e29f1ff598
69,221
def open_doors(digest): """Returns a 4-tuple of booleans representing whether doors UDLR are open.""" return [x in 'bcdef' for x in digest[:4]]
82e8abd146aabc16efa0eb0f5e5b7678e75f3fca
69,222
from pathlib import Path from typing import List from typing import Tuple from typing import Dict import yaml def find_configs(path : Path) -> List[Tuple[Dict, Path]]: """Returns the parsed content and paths of qaboard.yaml files that should be loaded for a (sub)project at the `path`. Returns a tuple (configs, paths). Each element is a list - the root qaboard.yaml is first and the subproject's is last. """ configsxpaths = [] # We need a full path to iterate on the parents path = path.resolve() # We look for qaboard.yaml configuration files in the path folder and its parents parents = [path, *list(path.parents)] for parent in parents: qatools_config_path = parent / 'qaboard.yaml' if not qatools_config_path.exists(): qatools_config_path = parent / 'qatools.yaml' # backward compatibility if not qatools_config_path.exists(): continue with qatools_config_path.open('r') as f: qatools_config = yaml.load(f, Loader=yaml.SafeLoader) if not qatools_config: # support empty files that just mark subprojects qatools_config = {} configsxpaths.append((qatools_config, qatools_config_path)) if qatools_config.get('root'): break configsxpaths.reverse() return configsxpaths
e7007eff5e986933f082c530351dc7e9fda5e27a
69,225
def gen_call_expr(test_case, prefix): """Generate call expression based on method name and parameters and method prefix, e.g. str""" prefix_as_list = [prefix] if prefix else [] call_expr_parts = ['data'] + prefix_as_list + ['{}({})'.format(test_case.name, test_case.params)] return '.'.join(call_expr_parts)
28e8617d63b1a55602d3912ec4d49768c3c4759b
69,228
def lerp(v0, v1, t): """ returns a value lerped between v0 and v1, according to t t == 0 maps to v0, t == 1 maps to v1 """ return v0 + ((v1 - v0) * t)
f8699f61925f60e649e98bdf18a8b4970afa6e12
69,229
def hexd(n): """Return hex digits (strip '0x' at the beginning).""" return hex(n)[2:]
7370225183ffb7ebcad1c2622917f06fdca83bbb
69,231
from pkg_resources import ( DistributionNotFound, VersionConflict, RequirementParseError, ) import pkg_resources from pathlib import Path import warnings def is_requirements_installed( requirements_file: 'Path', show_warning: bool = False ) -> bool: """Return True if requirements.txt is installed locally :param requirements_file: the requirements.txt file :param show_warning: if to show a warning when a dependency is not satisfied :return: True of False if not satisfied """ try: with requirements_file.open() as requirements: pkg_resources.require(requirements) except (DistributionNotFound, VersionConflict, RequirementParseError) as ex: if show_warning: warnings.warn(str(ex), UserWarning) return False return True
f38c7037745d131e7a89e7100d9563976f87369a
69,235
def get_template_user_keys(template): """ Finds the keys in a template that relate to the HumanUser entity. :param template: Template to look for HumanUser related keys. :returns: A list of key names. """ # find all 'user' keys in the template: user_keys = set() if "HumanUser" in template.keys: user_keys.add("HumanUser") for key in template.keys.values(): if key.shotgun_entity_type == "HumanUser": user_keys.add(key.name) return user_keys
cfeca37d39775a63b69f0ee0d582a0f4e03bb44e
69,236
import logging def getlogger(log, name): """Gets a logger given a logger and a package. Will return the given logger if the name is not generated from the current package, otherwise generate a logger based on __name__. :param log: Logger to start with. :type log: logging.Logger :param name: The __name__ of the caller. :type name: str """ return ( log if isinstance(log, logging.Logger) and not log.name.startswith(name.partition('.')[0]) else logging.getLogger(name))
ad9e5b2d30001ea98c7d79f2f789a548e21a60bc
69,239
def pump(fluid, pfinal, eta): """Adiabatically pump a fluid to pressure pfinal, using a pump with isentropic efficiency eta.""" h0 = fluid.enthalpy_mass() s0 = fluid.entropy_mass() fluid.set(S = s0, P = pfinal) h1s = fluid.enthalpy_mass() isentropic_work = h1s - h0 actual_work = isentropic_work / eta h1 = h0 + actual_work fluid.set(H = h1, P = pfinal) return actual_work
b77219de64b9793da00fcc7c6a832e8c3942edc5
69,244
import re def _sanitizeline(line): """ Sanitizes input asm lines by removing all whitespace and comments """ line = re.sub(r'\s', '', re.split(r'//', line)[0]) return line
9996d7e56b1aed3917dfa2cab287b253734f031b
69,248
def split_infiles(infiles): """ breaks the infile string with space-delimited file names and creates a list """ infileList = infiles.strip("\'").strip('\"').split(" ") if len(infileList) == 1: infileList = infileList[0].split(";") return(infileList)
4dca5c2edc80f4c05cbfd293982f2c89236d3c51
69,249
def find_char(char, word): """ find the position of a character in a word, exactly the same thing as str.index but with error handling """ assert len(char) == 1 and type(word) is str i = 0 while i < len(word): if word[i] == char: break else: i += 1 if i == len(word): return None return i
23ba373c978dfb129f505875544050936db3b152
69,250
def get_key(dict, value): """ Get the first key of the given value in the given dictionary :param dict:dict :param value:str :return:str """ for item in dict.items(): if item[1] == value: return item[0]
632ce093957a4f6de6807e721bbe2676610a97c9
69,255
def get_from_settings(settings, prefix='cors.', **kwargs): """ Get settings `dict` with keys starting with `prefix` :param settings: settings dictionary :type settings: dict :param prefix: key prefix :type prefix: str :param kwargs: override settings :type kwargs: dict :return: extracted settings dict :rtype: dict """ options = { key[len(prefix):]: settings[key] for key in settings if key.startswith(prefix) } options.update(kwargs) return options
8920f1b211c4159b5fee1ff7fc21d7e6ac0d5cdb
69,261
def is_latlon(name): """infer whether a variable is a lat/lon, based on the name""" return name.lower() in ['latitude', 'longitude', 'lat', 'lon']
67c66c2a003dade01613c159ec36af631202c83a
69,263