content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def cm2inch(*tupl, scale=3): """ Convert cm to inch and scale it up. Parameters: *tupl: *tuple(floats) Measures in cm to convert and scale. scale: float Scale factor. Default: 3 """ inch = 2.54 if isinstance(tupl[0], tuple): return tuple(scale * i/inch for i in tupl[0]) else: return tuple(scale * i/inch for i in tupl)
d84b3e60ac8a7ae2f46818510d224923dc39c730
18,673
def get_id_data_abstract_role_mappings(id_data): """Get the logical and physical names of the access control roles defined by a resource group. :param id_data: - Data extracted from a custom resource's physical id by get_data_from_custom_physical_resource_id :returns A dictionary mapping abstract role names to physical role names. """ return id_data.setdefault('AbstractRoleMappings', {})
d93acd5988f068ed5a203e811326c33ab5f6b8aa
18,674
def data_size_str(bytecount: int) -> str: """Given a size in bytes, returns a short human readable string. This should be 6 or fewer chars for most all sane file sizes. """ # pylint: disable=too-many-return-statements if bytecount <= 999: return f'{bytecount} B' kbytecount = bytecount / 1024 if round(kbytecount, 1) < 10.0: return f'{kbytecount:.1f} KB' if round(kbytecount, 0) < 999: return f'{kbytecount:.0f} KB' mbytecount = bytecount / (1024 * 1024) if round(mbytecount, 1) < 10.0: return f'{mbytecount:.1f} MB' if round(mbytecount, 0) < 999: return f'{mbytecount:.0f} MB' gbytecount = bytecount / (1024 * 1024 * 1024) if round(gbytecount, 1) < 10.0: return f'{mbytecount:.1f} GB' return f'{gbytecount:.0f} GB'
624e700d1014df925a7cccdea3430a311f4c7eed
18,676
from typing import List def decrypt_message(message: str, key: str) -> str: """Returns the result of XOR decrypting message with the given key.""" decrypted: List[str] = [] key_len = len(key) for i, byte in enumerate(message): decrypted_char = chr(ord(byte) ^ ord(key[i % key_len])) decrypted.append(decrypted_char) return ''.join(decrypted)
02a9ed6896c8aca48c693c076bede2ac666c99de
18,678
def get_ending_minute_after_loop(minutes, minute, prev_minute): """Get number of minutes after loop end to reach wanted minutes.""" loop_length = minute - prev_minute minutes_left = minutes - minute full_loops = minutes_left // loop_length minutes_in_loop = loop_length * full_loops return prev_minute + minutes_left - minutes_in_loop
9437e89e292e386e10fb39653e1c2289acbe09a9
18,679
import re def ends_in_file(path: str) -> bool: """Return True when path ends with '.%ext' or '%fn' while allowing for a lowercase marker""" RE_ENDEXT = re.compile(r"\.%ext}?$", re.I) RE_ENDFN = re.compile(r"%fn}?$", re.I) return bool(RE_ENDEXT.search(path) or RE_ENDFN.search(path))
742c24dea649efad6e8377aa370f325ef7fec51b
18,680
import torch def truncate_or_pad(tensor, dim, length, pad_index=0): """ truncate or pad the tensor on a given dimension to the given length. :param tensor: :param dim: :param length: :param pad_index: """ orig_length = tensor.size()[dim] # truncate if orig_length > length: return tensor.index_select(dim, torch.arange(0, length).long()) # pad else: # make pad pad_length = length - orig_length pad_size = list(tensor.size()) pad_size[dim] = pad_length pad_size = tuple(pad_size) pad = (torch.ones(pad_size) * pad_index).long() return torch.cat((tensor, pad), dim=dim)
20074a7f38cfa99f97cfa9e19ccbf5890c38ba1d
18,682
def normalizeName(name): """ Make normalized user name Prevent impersonating another user with names containing leading, trailing or multiple whitespace, or using invisible unicode characters. Prevent creating user page as sub page, because '/' is not allowed in user names. Prevent using ':' and ',' which are reserved by acl. @param name: user name, unicode @rtype: unicode @return: user name that can be used in acl lines """ username_allowedchars = "'@.-_" # ' for names like O'Brian or email addresses. # "," and ":" must not be allowed (ACL delimiters). # We also allow _ in usernames for nicer URLs. # Strip non alpha numeric characters (except username_allowedchars), keep white space name = ''.join([c for c in name if c.isalnum() or c.isspace() or c in username_allowedchars]) # Normalize white space. Each name can contain multiple # words separated with only one space. name = ' '.join(name.split()) return name
48ce83e2aef5e4bce993edd0e4cba230c2006641
18,683
def convert_to_int(byte_arr): """Converts an array of bytes into an array of integers""" result = [] for i in byte_arr: result.append(int.from_bytes(i, byteorder='big')) #introducem termenul liber result.insert(0, 1) # print(result) return result
03e7095dcf4d49e78ffa406e49b217e30bef67f4
18,684
def insertion_sort(lst): """ Sorts list using insertion sort :param lst: list of unsorted elements :return comp: number of comparisons """ comp = 0 for i in range(1, len(lst)): key = lst[i] j = i - 1 cur_comp = 0 while j >= 0 and key < lst[j]: lst[j + 1] = lst[j] j -= 1 cur_comp += 1 comp += cur_comp if cur_comp == 0: comp += 1 lst[j + 1] = key return comp
37b48a6f35828dfe88d6e329f93cec44b557136c
18,685
import requests import traceback def geocoding_address( lon, lat): """ 逆地理编码服务 refer: http://lbsyun.baidu.com/index.php?title=webapi/guide/webservice-geocoding http://api.map.baidu.com/geocoder/v2/?location=39.983424,116.322987&output=json&pois=1&ak=XDFk8RGryIrg3Hd7cr2101Yx http://api.map.baidu.com/geocoder/v2/?location=39.983424,116.322987&output=json&pois=0&ak=XDFk8RGryIrg3Hd7cr2101Yx :param lon: :param lat: :return: ak ='2lGY892LYZokThGF0Ie1FoXjIxaBFNi4' """ ak ='XDFk8RGryIrg3Hd7cr2101Yx' # ak = self.cfgs.get('ak') url = "http://api.map.baidu.com/geocoder/v2/" params = { 'location': "%s,%s" % (lat, lon), 'output': 'json', 'pois': 0, 'ak': ak } address = '' try: resp = requests.get(url, params) data = resp.json().get('result', {}) address = data.get('formatted_address', '') except: traceback.print_exc() # instance.getLogger().error("<%s> geocoding failed!" % (self.name,)) return address
6b42b25023baeca88df4d046d6b26386ae478cf9
18,686
def standardize_text(df, col_name, replace=True): """Standardize text column in DataFrame The standardization is done this way: Splittling string into lower caser words and removing trailing 's' replace -- set to False if it's desired to return new DataFrame instead of editing it. """ standard = [] for row in df[col_name]: entry = set() for word in row.replace('/', ',').split(','): entry.add(word.strip().lower().replace('\'s', '')) standard.append(entry) if replace: df[col_name] = standard return df else: new_df = df.copy() new_df[col_name] = standard return new_df
08ceeb0ad3b645920bcb86eba01cea20c143f102
18,689
def error_rate(error_count, total): """ Calculate the error rate, given the error count and the total number of words. Args: error_count (int): Number of errors. total (int): Total number of words (of the same type). Returns: tuple (int, int, float): The error count, the total number of words, and the calculated error rate. """ if total == 0: return error_count, total, 0.0 return error_count, total, (error_count/total)*100
92f9f10952f86087edf251ccaf3f1005fdd6cb57
18,690
def _runCrossValidate(fitter): """ Top level function that runs crossvalidation for a fitter. Used in parallel processing. Parameters ---------- AbstractFitter Returns ------- lmfit.Parameters, score """ fitter.fit() score = fitter.score() return fitter.parameters, score
6dccc89e3ca43d98f417caf0c66d7581fcd77b31
18,691
def markdown_format(input_data) -> str: """ Format input into nice markdown accordingly Dict -> formatted list of key/value pairs Everything Else -> Str Args: input_data (mixed): Returns: str: """ if isinstance(input_data, dict): return "\n".join(["*{}*: {}".format(k, v) for k, v in input_data.items()]) else: return input_data
d8602b3dce84504fcf6ef0bfaa6cc1b357bb0548
18,692
import pytz def dt_to_ms(dt): """Converts a datetime to a POSIX timestamp in milliseconds""" if dt.tzinfo is None: dt = dt.replace(tzinfo=pytz.UTC) return int(dt.timestamp() * 1000)
aa3050ce15e09b9c1ddeb1edbda8c6e4275f3ce6
18,693
def strip_data(df): """Remove unused columns from the data.""" names = [' dwpf', ' relh', ' drct', ' sknt', ' p01i', ' alti', ' mslp', ' vsby', ' gust', ' skyc1', ' skyc2', ' skyc3', ' skyc4', ' skyl1', ' skyl2', ' skyl3', ' skyl4', ' presentwx', ' metar'] for colname in names: del df[colname] return df
127c472d0b56c9360d2ac05715f1e614ab735941
18,694
import re def extract_page_body(content): """ Given raw HTML content from an HTTP response, extract the main div only. <html> <head>...</head> <body> <nav>...</nav> <div class="container-fluid wrapper"> <!-- BEGIN --> ... </div> <!-- END --> <footer class="footer">...</footer> ... </body> </html> """ try: return re.findall(r"(?<=</nav>).*(?=<footer)", content, flags=(re.MULTILINE | re.DOTALL))[0] except IndexError: return content
df01b52ef612030f6a9d6affd933b6a1518a3ab7
18,697
def get_url(year_of_study, session): """ :param year_of_study: 1, 2, 3 or 4. :param session: Examples: 20199 is fall 2019. 20195 is summer 2019. :return: """ return "https://student.utm.utoronto.ca/timetable/timetable?yos={0}&subjectarea=&session={1}&courseCode=&sname=&delivery=&courseTitle=".format( year_of_study, session)
371e1ad7b1d0d147ad906fd5f7fedd4d7d1f825d
18,698
import click def _validate_project_name(ctx, argument, value): """ 验证输入的项目名称是否正确 """ if not value or ' ' in value: raise click.BadParameter(u"项目名称中不能含有空格") return value
a309de9fdc9348ca3b1ecb2e2fdacf197bacefe9
18,700
def django_user_conversion(obj): """ Convert a Django user either by returning USERNAME_FIELD or convert it to str. """ if hasattr(obj, "USERNAME_FIELD"): return getattr(obj, getattr(obj, "USERNAME_FIELD"), None) else: return str(obj)
9e42f35f28799f732840a671ce9c16b67583ae19
18,702
import logging def __datatype_counter(times, series, max_value): """ generic counter datatype with parameterized max_value, could be either 2^32, 2^64 or something completely different do not use directly, use counter32, counter64 instead first value will always be 0.0 valid range of values : min: 0.0 max: max_value parameters: series <tuple> of <float> returns: <tuple> of <float> """ new_series = [0.0, ] # first value has to be in range if not 0.0 <= series[0] <= max_value: msg = "counter %f out of range at time %f, max_value: %f " % (series[0], times[0], max_value) logging.error(msg) raise AssertionError(msg) for index in range(1, len(series)): if not 0.0 <= series[index] <= max_value: # requirement for counter type msg = "counter %f out of range at time %f, max_value: %f " % (series[index], times[index], max_value) logging.error(msg) raise AssertionError(msg) duration = times[index] - times[index - 1] if duration > 0.0: # only if duration above zero derive = series[index] - series[index - 1] if derive < 0.0: # overflow detected derive = max_value - series[index - 1] + series[index] if derive < 0.0: msg = "max_value: %f, old value: %f, new value: %f, old time: %f, new time: %f" % (max_value, series[index - 1], series[index], times[index - 1], times[index]) logging.error(msg) raise AssertionError(msg) new_series.append(derive / duration) else: new_series.append(0.0) return new_series
f7ddc83f645a86000ca5a0ce291f75e8b1f163ac
18,707
def add_suffix(fname, suffix): """Adds a suffix to a file name.""" name, extension = fname.split(".") return name + "_" + suffix + "." + extension
53e8772bd5b974635010974d6373fbf5816ae520
18,709
def gen_subsets(items): """ returns: all subsets """ subsets = [] n = len(items) # 1 << n is equal to pow(2, n) for i in range(1 << n): subset = [] # i >> j is equal to i // pow(2, j) # A & 1 is equal to A % 2 for j in range(n): if (i >> j) & 1: subset.append(items[j]) subsets.append(subset) return subsets
2ac83550f4bcaa712a7ab9b06b3787f6474429ec
18,711
def smplFromParas(smpl, offsets, pose, betas, trans): """set the smpl model with the given parameters.""" smpl.v_personal[:] = offsets smpl.pose[:] = pose smpl.betas[:] = betas smpl.trans[:] = trans return smpl
9428578661c99a5d33f28064ac8ac8c6655259df
18,712
def pos_pow(z,k): """z^k if k is positive, else 0.0""" if k>=0: return z**k else: return 0.0
ba0a46e8711005b9ac0dbd603f600dfaa67fbdd4
18,713
def getNPoly(object): """ Return the number polygons in the object """ return len(object[1])
01a2808768d34c7ad31285100ee8c5440092a19f
18,714
def validate(self=None, raise_exception=True): """Mock validation api call.""" return False
01b40b5ef59b2145dd7f552aff5818e2a7b7d4d7
18,715
def normalized(spectrum): """Test if passed spectrum is truly normalized.""" for i in range(spectrum.shape[0]): if spectrum[i] < 0.0 or spectrum[i] > 1.0: return False return True
120a6dfd22451c4ea0143869acaa5e653c6ad896
18,716
def alreadyFoundURL(results, url): """ Checks whether a url String has already been found before, to prevent duplicates. Returns: Boolean -- True, if the specified URL has already been found, False, if not. """ for result in results: if url in result[1]: return True return False
5cd46ac7bedcf764c0eb3bf21879324fe9f7a1ff
18,717
def sort_user_links(links): """Sorts the user's social/contact links. Args: links (list): User's links. Returns: Sorted list of links. """ return sorted(map(tuple, links), key=lambda kv: kv[0].lower())
1f9eb06af28def57c019fb026d9149f3f6f367b3
18,718
def deep_round(A, ndigits=5): """ Rounds numbers in a list of lists. Useful for approximate equality testing. """ return [[round(val, ndigits) for val in sublst] for sublst in A]
cebfb6b2dbe83bcc7222e0dc1b67ca98e95576c5
18,719
def _dl_to_dict(dl): """ """ d = {} for dt in dl.select("dt"): key = dt.text.strip() # Assumption that the value is the next sibling # this is not always a dd tag (sometimes li tag for example) value = dt.find_next_sibling().text.strip() d[key] = value return d
9c8c79b296a4640a7d84a28f75d44bda7360299a
18,720
import re def trim_before_punctuation(input_lines): """ カンマ,セミコロンの直前の空白を取り除く Args: input_lines(list): 入力ファイル Returns: list: 句読点直前の空白が取り除かれた文字列 """ output_lines = [] for line in input_lines: new_line = re.sub(r'\s+(?=[,;])', '', line) output_lines.append(new_line) return output_lines
c4e3ac03e89888a8079e8ab245901e0a6c0cf6ee
18,721
import bisect def harm_to_sum(fwhm): """ harm_to_sum(fwhm): For an MVMD profile returns the optimal number of harmonics to sum incoherently """ fwhms = [0.0108, 0.0110, 0.0113, 0.0117, 0.0119, 0.0124, 0.0127, 0.0132, 0.0134, 0.0140, 0.0145, 0.0151, 0.0154, 0.0160, 0.0167, 0.0173, 0.0180, 0.0191, 0.0199, 0.0207, 0.0220, 0.0228, 0.0242, 0.0257, 0.0273, 0.0295, 0.0313, 0.0338, 0.0366, 0.0396, 0.0437, 0.0482, 0.0542, 0.0622, 0.0714, 0.0836, 0.1037, 0.1313, 0.1799, 0.2883] return len(fwhms)-bisect.bisect(fwhms, fwhm)+1
30563dbe4396e1661db52431a141a4fe032bc2d9
18,722
def nodestrength(mtx, mean=False): """ Compute the node strength of a graph. Parameters ---------- mtx : numpy.ndarray A matrix depicting a graph. mean : bool, optional If True, return the average node strength along the last axis of mtx. Returns ------- numpy.ndarray The node strength. """ ns = abs(mtx).sum(axis=0) if mean: ns = ns.mean(axis=-1) return ns
0b8a30a6b1ab2218368c0af0f6bf8036b819d431
18,723
def output_asc_file(asc_fname, Bx, By, Bz, Ex, Ey): """Units? Important: Bx, By, Bz, Ex, and Ey must be integer valued. The internal data structure to store this information in emtf is an integer array. """ with open(asc_fname, 'w') as fid: for vals in zip(Bx, By, Bz, Ex, Ey): line = ('{:7d}' * len(vals)).format(*vals) fid.write(line + '\n') return asc_fname
282e6522164d4a071e34f2c7aac269e5340b6123
18,724
def file_urls_mutation(dataset_id, snapshot_tag, file_urls): """ Return the OpenNeuro mutation to update the file urls of a snapshot filetree """ file_update = { 'datasetId': dataset_id, 'tag': snapshot_tag, 'files': file_urls } return { 'query': 'mutation ($files: FileUrls!) { updateSnapshotFileUrls(fileUrls: $files)}', 'variables': { 'files': file_update } }
ffa1ca42f5af7b93cfc6befb517b7909140b5b01
18,726
import os def get_num_data_items(dataset_directory): """Returns the number of identified data items inside the given directory. A data item is defined as a file that has the 'bin' extension.""" num_data_items = 0 for filename in os.listdir(os.path.join(dataset_directory, "non-shellcode")): name, extension = os.path.splitext(filename) if extension == ".bin": num_data_items += 1 for filename in os.listdir(os.path.join(dataset_directory, "shellcode")): name, extension = os.path.splitext(filename) if extension == ".bin": num_data_items += 1 return num_data_items
ea2e3496ed9e8c25416768f3dc3ee1535bffa3b6
18,728
def get_marginal_topic_distrib(doc_topic_distrib, doc_lengths): """ Return marginal topic distribution p(T) (topic proportions) given the document-topic distribution (theta) `doc_topic_distrib` and the document lengths `doc_lengths`. The latter can be calculated with `get_doc_lengths()`. """ unnorm = (doc_topic_distrib.T * doc_lengths).sum(axis=1) return unnorm / unnorm.sum()
ebcb87a5ddb5e5e2e3c2446134c6ef2ab8a945fa
18,729
def templates(): """ HTML templates are text files that provide a mixture of plain HTML and markup with custom 'tags'. A run-time library is used to process the tags, inserting/generating replacement text. Python web frameworks like Django rely heavily on templates, and Flask has made jinja2 popular. There are, however, other uses of templates -- for output of JSON or YAML (for example Ansible processes template text into YAML). """ return templates.__doc__
eacf2fcec3fe9297ddf8a091d4bbc3254e06a355
18,730
from typing import OrderedDict def replicate(d: OrderedDict): """ convert a dict with (element, count) into a list with each element replicated count many times """ l = [] for element, count in d.items(): l.extend([element]*count) return l
276d7ea922c645d689a3ec70247427d031e0fa34
18,731
import os import sys def directory(): """ Returns the directory of the Resources folder. """ return os.path.join(os.path.dirname(sys.argv[0]), "Resources")
0ce2a9c0275f603c64f8571f283bf1794f4a1fdb
18,733
def get_count_limited_class(classes, class_name, min=1, max=1): """ Find a class in an iterator over classes, and constrain its count Args: classes (:obj:`iterator`): an iterator over some classes class_name (:obj:`str`): the desired class' name min (:obj:`int`): the fewest instances of a class named `class_name` allowed max (:obj:`int`): the most instances of a class named `class_name` allowed Returns: :obj:`type`: the class in `classes` whose name (`__name__`) is `class_name`; if no instances of class are allowed, and no instances are found in `classes`, then return `None` Raises: :obj:`ValueError`: if `min` > `max, or if `classes` doesn't contain between `min` and `max`, inclusive, class(es) whose name is `class_name`, or if `classes` contains multiple, distinct classes with the name `class_name` """ if min > max: raise ValueError("min ({}) > max ({})".format(min, max)) matching_classes = [cls for cls in classes if cls.__name__ == class_name] if len(matching_classes) < min or max < len(matching_classes): raise ValueError("the number of members of 'classes' named '{}' must be in [{}, {}], but it is {}".format( class_name, min, max, len(matching_classes))) # confirm that all elements in matching_classes are the same unique_matching_classes = set(matching_classes) if 1 < len(unique_matching_classes): raise ValueError("'classes' should contain at most 1 class named '{}', but it contains {}".format( class_name, len(unique_matching_classes))) if matching_classes: return matching_classes[0] return None
4512bb22aa2ac1632813bea8f834785b9b7c5c20
18,735
def compute_total_impulse(spin_rate, roll_inertia, radial_distance): """Computes total impulse required to spin rocket at desired rate. `PEP 484`_ type annotations are supported. If attribute, parameter, and return types are annotated according to `PEP 484`_, they do not need to be included in the docstring: Args: spin_rate (int, float): Desired roll spin rate in rad/s of launch vehicle for stabilization. roll_inertia (int, float): The roll inertia of the launch vehicle in kg-m^2. radial_distance (int, float): The location of the solid rocket motors radially along the launch vehicle in m. Returns: total_impulse (float): The total impulse in N-s required to spin the launch vehicle to the desired rate. .. _PEP 484: https://www.python.org/dev/peps/pep-0484/ """ if spin_rate <= 0 or roll_inertia <= 0 or radial_distance <= 0: raise ValueError('Spin rate, roll inertia, and radial distance must be positive values.') total_impulse = roll_inertia*spin_rate/float(radial_distance) return total_impulse
ec616f29a5e781e4583074ca70e19811a89ee32a
18,736
import requests def post_splunk(url, token, payload): """ Send a post request to Splunk API """ headers = {'Authorization': 'Splunk {}'.format(token)} res = requests.post(url=url, headers=headers, data=payload, verify=False) res.raise_for_status() return res.json()
45310cabaf65004217cd2bcd0196314cb4fbabd7
18,737
def basis_size(request) -> int: """Number of basis vectors.""" return request.param
25fe88c9fee6611186aff049d1e0a10f748d88f2
18,739
import math def nu_e(n_n, n_e, T_e): """approximate calculation of electron collision frequency from Kelly 89 Parameters ---------- n_n : (float) neutral density cm-3 n_e : (float) electron density cm-3 T_e : (float) electron temperature K """ nu_e_n = 5.4 * 10**(-10) * n_n * T_e**(1/2) nu_e_i = (34 + 4.18 * math.log(T_e**3 / n_e)) * n_e * T_e**(-3/2) return nu_e_n + nu_e_i
3c73538dd97a4f03d0a98d8fe4427697089234e8
18,740
import math def line(p0, p1): """ Create line between two points based on Bresenham algorithm """ steep = False x0 = p0[0] y0 = p0[1] x1 = p1[0] y1 = p1[1] if math.fabs(x0 - x1) < math.fabs(y0 - y1): x0, y0 = y0, x0 x1, y1 = y1, x1 steep = True if x0 > x1: x0, x1 = x1, x0 y0, y1 = y1, y0 dx = x1 - x0 dy = y1 - y0 if dx == 0: derror = 0.1 else: derror = math.fabs(dy / dx) error = 0.0 y = y0 x = x0 points = [] while x <= x1: points.append((y, x) if steep else (x, y)) error += derror if error > 0.5: y += 1 if y1 > y0 else -1 error -= 1. x += 1 return points
3f590e2416280dc67467b6d08bc39b50e84e0717
18,741
def str2array(input_str): """ Convert string to array :param input: str action in string format :return: float np.array action """ action = input_str.split("[") action = action[1].split("]") action = action[0].split(" ") action_final = [] for el in action: if not el == '': action_final.append(float(el)) return action_final
521a9617622ea26b2d88369a9df5048a9fcf2525
18,742
from typing import Any def get_db_table(model_class: Any) -> str: """E.g. (RealmDomain -> 'zerver_realmdomain')""" return model_class._meta.db_table
8a97923b577eae12543fda585b14dd9363a23b48
18,745
def indent(data: str, spaces: int = 4) -> str: """ indents a code block a set amount of spaces note: is ~1.5x faster than textwrap.indent(data, " " * spaces) (from my testing) """ indent_block = " " * spaces return "\n".join((indent_block + line if line else line) for line in data.splitlines())
238ce90884ff01c3b25f28ff96b1a38cd7901577
18,746
import functools def synchronized(fn): """Runs the wrapped method under a lock. The self parameter to the wrapped function is expected to have a __heap_lock attribute. """ @functools.wraps(fn) def wrapper(self, *args, **kwargs): with self._heap_lock: return fn(self, *args, **kwargs) return wrapper
72c751500f6ac9a87d143977674a23af7029daa8
18,750
def default_hyperparams(input_dim=0): """ Default parameter dictionary for the experiment. Parameters ---------- input_dim: int, optional Returns ------- hyperparams: dict """ hyperparams = { "nvis": input_dim, "nhid": 100, "dataset_name": "smri", "learning_rate": 0.001, "min_lr": 0.0001, "decay_factor": 1.0005, "batch_size": 10, "init_momentum": 0.0, "final_momentum": 0.5, "termination_criterion": { "__builder__": "pylearn2.termination_criteria.MonitorBased", "channel_name": "\"valid_reconstruction_cost\"", "prop_decrease": 0, "N": 20 }, "weight_decay": { "__builder__": "pylearn2.costs.mlp.L1WeightDecay", "coeffs": {"z": 0.01} }, "niter": 1, "data_class": "MRI_Standard", "weight_decay": { "__builder__": "pylearn2.costs.dbm.L1WeightDecay", "coeffs": [0.01] } } return hyperparams
eba0f34d52b6f0d8ddfa1f4ea99b92f6c90f64fa
18,751
def ltd(balance_df): """Checks if the current LTD (Long Term Debt) was reduced since previous year Explanation of LTD: https://www.investopedia.com/terms/l/longtermdebt.asp balance_df = Balance Sheet of the specified company """ lt_debt_curr = balance_df.iloc[balance_df.index.get_loc("Long Term Debt"),0] lt_debt_prev = balance_df.iloc[balance_df.index.get_loc("Long Term Debt"),1] if (lt_debt_curr < lt_debt_prev): return True else: return False
3a43098da821e96d605f3bf579e0ca8e11d1a66b
18,752
def quick_sort(arr): """Sort array of numbers with quicksort.""" if len(arr) == 1: return arr if len(arr) > 1: pivot = arr[0] left = 1 right = len(arr) - 1 while left <= right: if arr[left] > pivot and arr[right] < pivot: arr[left], arr[right] = arr[right], arr[left] left += 1 right -= 1 elif arr[left] <= pivot and arr[right] < pivot: left += 1 elif arr[left] > pivot and arr[right] >= pivot: right -= 1 elif arr[left] <= pivot and arr[right] >= pivot: left += 1 right -= 1 arr[0], arr[right] = arr[right], arr[0] divider = right + 1 first = quick_sort(arr[:right]) second = quick_sort(arr[divider:]) return first + [arr[right]] + second else: return arr
9f40258588967247379d50532dd62ec0588365b1
18,753
def _include_exclude_list(include, exclude): """ create the list of queries that would be checked for include or exclude """ keys = [] if include: for item in include: keys.append((item, 'included')) if exclude: for item in exclude: keys.append((item, 'excluded')) return keys
ed968d56d6bb095de118f6526e277a54e1610cc4
18,754
def fetch_specie(specie_url, fetcher): """ Get Data of a specie. """ return fetcher(specie_url)
5882affad01a750d2afb5ca047a9f82872d03c56
18,755
import pickle def load_pik(pikFile): """Convenience function for simple loading of pickle files """ with open(pikFile, 'rb') as fid: d = pickle.load(fid) return d
f70f08bbf98fddad7bc1d99dee2049b8525c13e5
18,759
def classImplements(c, ms): """ c is a class, and ms is a set of method names. Returns True if c implements all the methods in c. Complains otherwise, and returns False """ result = True for n in ms: m = getattr(c, n, False) if not (m and callable(m)): print(c, "does not have method", n) result = False return result
e459e9a50cce501356d5494fb616cfa4df32fff7
18,760
def datetime_to_grass_datetime_string(dt): """Convert a python datetime object into a GRASS datetime string .. code-block:: python >>> import grass.temporal as tgis >>> import dateutil.parser as parser >>> dt = parser.parse("2011-01-01 10:00:00 +01:30") >>> tgis.datetime_to_grass_datetime_string(dt) '01 jan 2011 10:00:00 +0090' >>> dt = parser.parse("2011-01-01 10:00:00 +02:30") >>> tgis.datetime_to_grass_datetime_string(dt) '01 jan 2011 10:00:00 +0150' >>> dt = parser.parse("2011-01-01 10:00:00 +12:00") >>> tgis.datetime_to_grass_datetime_string(dt) '01 jan 2011 10:00:00 +0720' >>> dt = parser.parse("2011-01-01 10:00:00 -01:30") >>> tgis.datetime_to_grass_datetime_string(dt) '01 jan 2011 10:00:00 -0090' """ # GRASS datetime month names month_names = ["", "jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec"] if dt is None: raise Exception("Empty datetime object in datetime_to_grass_datetime_string") # Check for time zone info in the datetime object if dt.tzinfo is not None: tz = dt.tzinfo.utcoffset(0) if tz.seconds > 86400 / 2: tz = (tz.seconds - 86400) / 60 else: tz = tz.seconds/60 string = "%.2i %s %.2i %.2i:%.2i:%.2i %+.4i" % (dt.day, month_names[dt.month], dt.year, dt.hour, dt.minute, dt.second, tz) else: string = "%.2i %s %.4i %.2i:%.2i:%.2i" % (dt.day, month_names[ dt.month], dt.year, dt.hour, dt.minute, dt.second) return string
47495bb7a26fda2dfc40b1cbdbff045030fecc31
18,761
import os import argparse import json def argparse_file_json(file_path): """ Validates whether a file contains JSON parsable by Python. Raises argparse.ArgumentTypeError if not. """ if not os.path.isfile(file_path): raise argparse.ArgumentTypeError('Provided configuration file does not exist') try: with open(file_path, 'r') as file: data = json.load(file) except json.decoder.JSONDecodeError: raise argparse.ArgumentTypeError('Provided configuration file content is not json') except Exception as e: raise argparse.ArgumentTypeError('Cannot parse provided configuration file:\n' + str(e)) return data
82b651439442fd6a780ec0b695f8452b80f4d4f8
18,762
def flip_index(i, n): """Reorder qubit indices from largest to smallest. >>> from sympy.physics.quantum.qasm import flip_index >>> flip_index(0, 2) 1 >>> flip_index(1, 2) 0 """ return n-i-1
c6b8f7143bda5cdf80c7994041536b25d142c3cc
18,763
import io import csv def Csv(header, rows): """CSV query output.""" stringio = io.StringIO() writer = csv.writer(stringio) writer.writerow(header) for row in rows: writer.writerow(row) return stringio.getvalue()
329c8436849af21ad5f04cdf60314903e737f51b
18,765
import os def get_job_id(short=True): """ Return PBS job id """ jobid = os.environ.get('PBS_JOBID', '') if short: # Strip off '.rman2' jobid = jobid.split('.')[0] return(jobid)
5b1623e033cd99e8fc315d129485542a2cd77d6d
18,766
def first_half(dayinput): """ first half solver: """ half = len(dayinput) // 2 end = len(dayinput) dayinput = dayinput * 2 i = 0 total = 0 while i < end: next_i = i + half if dayinput[i] == dayinput[next_i]: total += int(dayinput[i]) i += 1 return total
8c84ffefe6d360b26658da8ff60a9d889824af98
18,767
def _splitquery(url): """splitquery('/path?query') --> '/path', 'query'.""" path, delim, query = url.rpartition('?') if delim: return path, query return url, None
ea1d04538d90139bc257a439a0d6bf02e6b15b13
18,768
def block_compute(x_start, x_stop, y_start, y_stop, z_start, z_stop, origin=(0, 0, 0), block_size=(512, 512, 16)): """ Get bounding box coordinates (in 3D) of small cutouts to request in order to reconstitute a larger cutout. Arguments: x_start (int): The lower bound of dimension x x_stop (int): The upper bound of dimension x y_start (int): The lower bound of dimension y y_stop (int): The upper bound of dimension y z_start (int): The lower bound of dimension z z_stop (int): The upper bound of dimension z Returns: [((x_start, x_stop), (y_start, y_stop), (z_start, z_stop)), ... ] """ # x x_bounds = range(origin[0], x_stop + block_size[0], block_size[0]) x_bounds = [x for x in x_bounds if (x > x_start and x < x_stop)] if len(x_bounds) is 0: x_slices = [(x_start, x_stop)] else: x_slices = [] for start_x in x_bounds[:-1]: x_slices.append((start_x, start_x + block_size[0])) x_slices.append((x_start, x_bounds[0])) x_slices.append((x_bounds[-1], x_stop)) # y y_bounds = range(origin[1], y_stop + block_size[1], block_size[1]) y_bounds = [y for y in y_bounds if (y > y_start and y < y_stop)] if len(y_bounds) is 0: y_slices = [(y_start, y_stop)] else: y_slices = [] for start_y in y_bounds[:-1]: y_slices.append((start_y, start_y + block_size[1])) y_slices.append((y_start, y_bounds[0])) y_slices.append((y_bounds[-1], y_stop)) # z z_bounds = range(origin[2], z_stop + block_size[2], block_size[2]) z_bounds = [z for z in z_bounds if (z > z_start and z < z_stop)] if len(z_bounds) is 0: z_slices = [(z_start, z_stop)] else: z_slices = [] for start_z in z_bounds[:-1]: z_slices.append((start_z, start_z + block_size[2])) z_slices.append((z_start, z_bounds[0])) z_slices.append((z_bounds[-1], z_stop)) # alright, yuck. but now we have {x, y, z}_slices, each of which hold the # start- and end-points of each cube-aligned boundary. For instance, if you # requested z-slices 4 through 20, it holds [(4, 16), (16, 20)]. # For my next trick, I'll convert these to a list of: # ((x_start, x_stop), (y_start, y_stop), (z_start, z_stop)) chunks = [] for x in x_slices: for y in y_slices: for z in z_slices: chunks.append((x, y, z)) return chunks
2b711e3560f513d135f4da0ca03dd5aab83972aa
18,770
def expected_dt_m_nrp_values(): """Values for m_nrp.""" return {"M_nrp_post_top3": 86.777, "M_nrp_post_top1": 76.446}
1432fd6ab1f3fa30ab65238cea26deeb3eabf885
18,772
def sub_string(string, sub): """Функция проверяет, следуют ли буквы в нужном порядке в слове""" index = 0 for c in sub: try: index += string[index:].index(c)+1 except ValueError: return False else: return True
ef48830a99a79c4138d0fb8671aeef9e719bc683
18,773
from typing import List def position_util(cmd_line: List[str], word_position: int, word_before_cursor: str) -> bool: """ Util method for autocompletion conditions. Makes autocomplete work well. :param cmd_line: the list of command line words :param word_position: the position of the word we are attempting to autocomplete :param word_before_cursor: word_before_cursor parsed from the document :return: True if we should try to autocomplete this word. """ # Special case for no characters typed yet (we send in [''] as cmd_line which fucks with the logic) if word_position == 1 and len(cmd_line) == 1 and cmd_line[0] == '': return True # Don't keep completing after the word position # Don't complete if we just hit space after the word position # Don't complete on the previous word position until there is a space return len(cmd_line) < word_position + 1\ and not (len(cmd_line) == word_position and word_before_cursor == '')\ and not (len(cmd_line) == word_position - 1 and word_before_cursor != '')
463bb420d7a7cc1aa336b8d664194cc99b5c4dcb
18,774
import torch def calculate_reward(target, prediction): """ Calculating a reward for a prediction. :param target: True graph label. :param prediction: Predicted graph label. """ reward = (target == torch.argmax(prediction)) reward = 2*(reward.float()-0.5) return reward
8485dbd706433b3a83110b22e30bc9b99bdb6156
18,776
def get_fmriprep_outlier_volumes_from_confounds(confounds_df): """extract which volume numbers are outliers from the fmriprep confounds df. Returns: bad_volumes: list eg [34, 35, 100, 150] """ # get the motion columns motion = confounds_df.filter(regex='motion') # find any rows with values above 0 return_df = motion[(motion > 0).any(1)] # return the index (row names) of this df return list(return_df.index)
b62d833ec2b7f000584354ca6470863acb33682c
18,777
def shortest(node): """ 求最大得分的路径 """ childs = node.get("childs") score = node.get('score', 0) key = node.get('key') if not childs: return score, [key] current_score, current_seq = -1, [] for child in childs: _score, _seq = shortest(child) if _score > current_score: # 更大的得分 current_score, current_seq = _score, _seq return current_score + score, [key] + current_seq
0a31fad969b19ac99039a6d8d71d2750f149f280
18,778
def _priority_connection(priority_route:dict, cost_matrix, mean_routes, std_routes, mean_weight, std_weigth, distance_weight) -> dict: """ Give us the priority connections dictionary for each route Parameters ----------- priority_route: dict The dictionary with the priority routes cost_matrix: dict of dicts it's a dict of dicts with the points to visit as a keys and the value is another dictionary with the points to visit and the value of going for the first point to the second. It's a dictionary representation of a matrix. mean_routes: dict A dict with the mean distance/cost of all points one each other std_routes: dict A dict with the standar deviation distance/cost of all points one each other mean_weight: float The ponderation of the mean for all points, higher number make points with higher mean (more distance from others) have large values and priorice their connections std_weight: float The ponderation of the standar deviation for all points, higher number make points with higher deviation (more diferents of distance between points) have larger values and priorice their connections distance_weight: float The ponderation of the distance between all points, higher distance_weight make points with higher distances between them have large values Return ------- A dict of dicts every point connections ordered by priority """ dict_prior_connections_ordered = {} base_order = priority_route.keys() for id_route in base_order: prior_connections = {} for given_route in base_order: if id_route != given_route: prior_connections[given_route] = ((mean_routes[given_route]**mean_weight) * (std_routes[given_route]**std_weigth)) / cost_matrix[given_route][id_route] ** distance_weight dict_prior_connections_ordered[id_route] = dict(sorted(prior_connections.items(), reverse=True, key=lambda x: x[1])) return dict_prior_connections_ordered
b4be98cd4fe07b3592e4fa44a63d38195a7dfd05
18,779
def merge_dicts(*dicts): """Ordered merge of dicts, with right overriding left.""" d = {} for newdict in dicts: d.update(newdict) return d
ad3f31b88a7423cfb844bea462e818692d5ab716
18,783
def parse_user_input(user_input, number): """Takes user input related to desired TopSpin directories to install to and parse the input.""" if user_input == "": # User pressed <Return>. Return list of all valid indices. return(list(range(number))) if user_input == '0': # User pressed 0. Return empty list (no installation will take place) print("No installation of the nmrespy app will " "occur...") return [] # Split input at whitespace (filter out any empty elements) values = list(filter(lambda x: x != "", user_input.split(" "))) for value in values: # Check each element is numeric and of valid value if not (value.isnumeric() and int(value) <= number): return False # Return indices coresponding to TopSPin paths of interest return [int(value) - 1 for value in values]
b54a6fb3643cbbaccf465abd36aaa8de19bf952d
18,784
import inspect def _get_custom_pkg_info(name, fn): """Retrieve information about the installed package from the install function. """ vals = dict((k, v) for k, v in inspect.getmembers(fn)) code = inspect.getsourcelines(fn) if vals["__name__"] == "decorator": fn = [x for x in fn.func_closure if not isinstance(x.cell_contents, str)][0].cell_contents vals = dict((k, v) for k, v in inspect.getmembers(fn)) code = inspect.getsourcelines(fn) version = "" for line in (l.strip() for l in code[0]): if line.find("version") >= 0 and line.find(" =") > 0: version = line.split()[-1].replace('"', '').replace("'", "") if version: break doc = vals.get("func_doc", "") descr, homepage = "", "" if doc is not None: descr = doc.split("\n")[0] for line in doc.split("\n"): if line.strip().startswith("http"): homepage = line.strip() return {"name": name.replace("install_", ""), "description": descr, "homepage_uri": homepage, "version": version}
bf8edb8c731db637b7b3e46c085c152d14f2b8c9
18,785
def AlgoBackward(k, V, s, M): """ k(int): Le nombre de types de bocaux du systéme de capacité V(int[k]): La liste des volumes des bocaux du systéme de capacité s(int): La quantité de confiture M(int[k+1][s+1]): une liste contenant a chaque case i la liste de bocaux utilisés pour la quantité i de confiture Crée la liste A des bocaux utilisés par la solution et retourne le couple sum(A),A ou sum(A) représente le nombre de bocaux utilisés par la solution """ A = [0]*k q = s i = k while q != 0: if (i > 0) and (M[q][i] == M[q][i-1]): i = i - 1 else: A[i-1] = A[i-1] + 1 q = q - V[i-1] return sum(A),A
716f539ca6123ca0dcc5700b3e9de67f0828d7db
18,786
import os def _locate(filename): """ Find the file relative to where the test is located. """ return os.path.join(os.path.dirname(__file__), filename)
bd4674939585fd53cac62b226679e17a9ddf7849
18,787
import jinja2 def fillHTMLTemplate(templateString, params): """Invokes the jinja2 methods to fill in the slots in the template.""" templateObject = jinja2.Template(templateString) htmlContent = templateObject.render(params) return htmlContent
bba20da7a5411bf8b252fffcdb108516a2d5afa9
18,790
def check_above_or_below(considering_pt, remaining_pt_1, remaining_pt_2): """ This function is used to check if the considering point is above or below the line connecting two remaining points.\n 1: above\n -1: below """ orthogonal_vector = remaining_pt_2 - remaining_pt_1 line_connecting_pt1_and_pt2 = -orthogonal_vector[1] * (considering_pt[0] - remaining_pt_1[0]) \ + orthogonal_vector[0] * (considering_pt[1] - remaining_pt_1[1]) if line_connecting_pt1_and_pt2 > 0: return 1 return -1
0da67abc45356260580beec22f37a56bd5f43398
18,792
import torch def y_to_pi(y, K=-1): """ Turns a vector of class labels y into a one-hot encoding class membership matrix Pi. Note: Assuming classes are indices {0, 1, ..., K - 1}. If K is not provided, picks K = max(y) + 1. Args: y: The vector of class labels. K: The number of classes, if provided. Optional since we can estimate it from given labels. Returns: A class membership matrix Pi (N, K) """ return torch.nn.functional.one_hot(y, K)
2c0a7374defe20a5573784c3f5f44885003bfc63
18,793
def remove_carriage_ret(lst): """ Remove carriage return - \r from a list """ return list(map(lambda el: el.replace('\r',''), lst))
7a576d74c216deef41ce7023e6cec8af2d057031
18,794
def contains(r_zero, r_one): """ Takes two squares and determines if square one is contained within square zero Returns a boolean """ w_zero = r_zero.width + r_zero.x h_zero = r_zero.height + r_zero.y w_one = r_one.width + r_one.x h_one = r_one.height + r_one.y return ( r_zero.x <= r_one.x and w_zero >= w_one and r_zero.y <= r_one.y and h_zero >= h_one )
f475c376679170002dabc19bf0ae4df75e491bba
18,795
import uuid def make_uuid() -> str: """make uuid4 Returns: str: uuid4 """ return "tmp_" + str(uuid.uuid4())
f44443985ba728916c69ca03c2b2db27417b001b
18,796
def config_dict(): """Sample config dict.""" d = { "title": "TOML Example", "owner": { "name": "Tom Preston-Werner", "dob": "1979-05-27", }, "database": { "server": "192.168.1.1", "ports": [8001, 8001, 8002], "connection_max": 5000, "enabled": True, }, "servers": { "alpha": {"ip": "10.0.0.1", "dc": "eqdc10"}, "beta": {"ip": "10.0.0.2", "dc": "eqdc10"}, }, "clients": { "data": [["gamma", "delta"], [1, 2]], }, } return d
f1a6b1f07f22b2a57c10c7f98ee8932c0201f57f
18,797
from typing import Dict from typing import Any def _count_populations_in_params(params: Dict[str, Any], prefix: str) -> int: """ Counts the number of electron or ion populations in a ``params`` `dict`. The number of populations is determined by counting the number of items in the ``params`` `dict` with a key that starts with the string defined by ``prefix``. """ return len([key for key in params if key.startswith(prefix)])
0678e198f6e5562eb26e8d2c68aad14422d0f820
18,801
import pandas def add_sum_of_rows(df, breakdown_col, value_col, new_row_breakdown_val, breakdown_vals_to_sum=None): """Returns a new DataFrame by appending rows by summing the values of other rows. Automatically groups by all other columns, so this won't work if there are extraneous columns. For example, calling `add_sum_of_rows(df, 'race', 'population', 'Total')` will group by all columns except for 'race' and 'population, and for each group add a row with race='Total' and population=the sum of population for all races in that group. df: The DataFrame to calculate new rows from. breakdown_col: The name of the breakdown column that a new value is being summed over. value_col: The name of the column whose values should be summed. new_row_breakdown_val: The value to use for the breakdown column. breakdown_vals_to_sum: The list of breakdown values to sum across. If not provided, defaults to summing across all values. """ filtered_df = df if breakdown_vals_to_sum is not None: filtered_df = df.loc[df[breakdown_col].isin(breakdown_vals_to_sum)] group_by_cols = list(df.columns) group_by_cols.remove(breakdown_col) group_by_cols.remove(value_col) sums = filtered_df.groupby(group_by_cols).sum().reset_index() sums[breakdown_col] = new_row_breakdown_val result = pandas.concat([df, sums]) result = result.reset_index(drop=True) return result
4ec64deb1b6999942ad01895c5f8dd2a7ab0b053
18,802
def sort_topological(sink): """Returns a list of the sink node and all its ancestors in topologically sorted order.""" L = [] # Empty list that will contain the sorted nodes T = set() # Set of temporarily marked nodes P = set() # Set of permanently marked nodes def visit(node): if node in P: return if node in T: raise ValueError('Your graph is not a DAG!') T.add(node) # mark node temporarily for predecessor in node.get_predecessors(): visit(predecessor) P.add(node) # mark node permanently L.append(node) visit(sink) return L
a5747bc16ef5f2f12888f023548c3ed6ebb45e74
18,803
def gps_to_utc(gpssec): """ Convert GPS seconds to UTC seconds. Parameters ---------- gpssec: int Time in GPS seconds. Returns ------- Time in UTC seconds. Notes ----- The code is ported from Offline. Examples -------- >>> gps_to_utc(0) # Jan 6th, 1980 315964800 """ kSecPerDay = 24 * 3600 kUTCGPSOffset0 = (10 * 365 + 7) * kSecPerDay kLeapSecondList = ( ((361 + 0 * 365 + 0 + 181) * kSecPerDay + 0, 1), # 1 JUL 1981 ((361 + 1 * 365 + 0 + 181) * kSecPerDay + 1, 2), # 1 JUL 1982 ((361 + 2 * 365 + 0 + 181) * kSecPerDay + 2, 3), # 1 JUL 1983 ((361 + 4 * 365 + 1 + 181) * kSecPerDay + 3, 4), # 1 JUL 1985 ((361 + 7 * 365 + 1) * kSecPerDay + 4, 5), # 1 JAN 1988 ((361 + 9 * 365 + 2) * kSecPerDay + 5, 6), # 1 JAN 1990 ((361 + 10 * 365 + 2) * kSecPerDay + 6, 7), # 1 JAN 1991 ((361 + 11 * 365 + 3 + 181) * kSecPerDay + 7, 8), # 1 JUL 1992 ((361 + 12 * 365 + 3 + 181) * kSecPerDay + 8, 9), # 1 JUL 1993 ((361 + 13 * 365 + 3 + 181) * kSecPerDay + 9, 10), # 1 JUL 1994 ((361 + 15 * 365 + 3) * kSecPerDay + 10, 11), # 1 JAN 1996 ((361 + 16 * 365 + 4 + 181) * kSecPerDay + 11, 12), # 1 JUL 1997 ((361 + 18 * 365 + 4) * kSecPerDay + 12, 13), # 1 JAN 1999 # DV: 2000 IS a leap year since it is divisible by 400, # ie leap years here are 2000 and 2004 -> leap days = 6 ((361 + 25 * 365 + 6) * kSecPerDay + 13, 14), # 1 JAN 2006 ((361 + 28 * 365 + 7) * kSecPerDay + 14, 15), # 1 JAN 2009 ((361 + 31 * 365 + 8 + 181) * kSecPerDay + 15, 16), # 1 JUL 2012 ((361 + 34 * 365 + 8 + 181) * kSecPerDay + 16, 17), # 1 JUL 2015 ((361 + 36 * 365 + 9) * kSecPerDay + 17, 18) # 1 JAN 2017 ) leapSeconds = 0 for x in reversed(kLeapSecondList): if gpssec >= x[0]: leapSeconds = x[1] break return gpssec + kUTCGPSOffset0 - leapSeconds
342c479df8d4c864592494e8d50452e9f02c04d5
18,804
def get_assoc_scheme_parameter(assoc_scheme): """ assoc_scheme - 3B, 3C, etc. Output: param - Thermopack parameter """ if assoc_scheme.upper() == "NONE": param = "no_assoc" else: param = "assoc_scheme_{}".format(assoc_scheme) return param
b0f7a09d157dcaa466cec2b2a47684a1ee2df80b
18,806
def match_1word(word, pwords): """Match probe words against word. Return: - hits1: set of letters that match - hits2: set of matched (letter, pos) tuples. - wlets: unique letters in the word - badlets: set of bad letters - badlets_p: list of bad letter sets by position """ hits1 = set() # letters hits2 = set() # (letter, pos) tuples #hits2u = set() # letters in correct position wlets = set(word) plets = set() badlets_p = [set() for _ in range(len(word))] badlets = set() for pw in pwords: hits1.update(wlets.intersection(set(pw))) plets.update(set(pw)) badlets.update(set(pw) - wlets) for i, let in enumerate(word): if pw[i] == let: hits2.update([(let, i)]) else: badlets_p[i].add(pw[i]) return hits1, hits2, wlets, badlets, badlets_p
b3f41ce8381f9d230037a7c8f82138381ea2db81
18,807
import requests from bs4 import BeautifulSoup def get_page_soup(url): """ Returns html of a given url Parameters: url (string): complete url Returns: html: pageSoup """ headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'} pageTree = requests.get(url, headers=headers) pageSoup = BeautifulSoup(pageTree.content, 'html.parser') return pageSoup
efbdbb92e0b93bf6390541a9608855da266d7682
18,808
import operator def manacher(s): """ complexity analysis: notice MX only from 0 to N, rightmost always move to right ascending. so O(N) """ # interleave with "#" for handle odd substring and even string two cases s = [i for j in zip("#" * len(s), s) for i in j] s = "".join(s) s += "#" ln = len(s) # p: centered with s[i] # cen: center index of rightmost substring from 0 to now(i) # mx: rightmost position + 1 of longest substring from 0 to now(i) p = [0] * ln mx, cen = 0, 0 for i in range(1, ln): # mx > i: longest substring cover new i position # example: # c # a # b # a # b # a # # p 0 2 1 2 1 4 1 - - - - - - # ^ ^ ^ ^ # | CEN i MX # 2*CEN-i # - i and 2*CEN-i is palindrome with CEN # - from [2*CEN-MX, MX] is palindrome # => left substring is palindrome, right substring also palindrome # so we could save a lot of comparison now. p[i] = min(p[2 * cen - i], mx - i) if mx > i else 1 # continue to extend substring with palindrome checking while i + p[i] < ln and i - p[i] >= 0 and s[i + p[i]] == s[i - p[i]]: p[i] += 1 # if new substring is more right, continue to use it. if i + p[i] > mx: mx = i + p[i] cen = i # get longest one, and its index idx, radius = max(enumerate(p), key=operator.itemgetter(1)) radius -= 1 # original radius conclude center point, so remove it ret = s[idx - radius: idx + radius + 1] ret = [i for i in ret if i != '#'] # filter out # ret = "".join(ret) return ret
45b711765e447120747b81e166511a3139cecb41
18,809
def scrub(string, exclude=[]): """ Avoid SQL injection attacks. Only return alphanumeric characters or the contents of exclude that are in the original string. """ # Common punctuation. exclude += [' ', '(', ')', '.', '?', '!', ':', '-', '@'] return ''.join([char for char in string if char.isalnum() or char in exclude])
68140d7072cdd852f15cba0e82d9dc14aadf73ec
18,810
import torch def clip_boxes_to_image(boxes, size): """copy from torchvision.ops.boxes Clip boxes so that they lie inside an image of size `size`. Arguments: boxes (Tensor[N, 4]): (left, top, right, bottom) size (Tuple(H, W)): size of the image Returns: clipped_boxes (Tensor[N, 4]) """ dim = boxes.dim() boxes_x = boxes[..., 0::2] boxes_y = boxes[..., 1::2] height, width = size boxes_x = boxes_x.clamp(min=0, max=width - 1) boxes_y = boxes_y.clamp(min=0, max=height - 1) clipped_boxes = torch.stack((boxes_x, boxes_y), dim=dim) return clipped_boxes.reshape(boxes.shape)
139e865f8137f1529141546a9d71877599adacd1
18,811
def extend_schema_field(field): """ Decorator for the "field" kind. Can be used with ``SerializerMethodField`` (annotate the actual method) or with custom ``serializers.Field`` implementations. If your custom serializer field base class is already the desired type, decoration is not necessary. To override the discovered base class type, you can decorate your custom field class. Always takes precedence over other mechanisms (e.g. type hints, auto-discovery). :param field: accepts a ``Serializer`` or :class:`~.types.OpenApiTypes` """ def decorator(f): if not hasattr(f, '_spectacular_annotation'): f._spectacular_annotation = {} f._spectacular_annotation['field'] = field return f return decorator
91f665f927357813f43963ce9c348f8da7a2c5c8
18,812
import time def tic(): """Records the time in highest resolution possible for timing code.""" return time.perf_counter()
f3b293f19258d1b6c42677f2d6847033816bf761
18,813
import numpy def overturning_streamfunction(v, dz, dx): """ calculate meriodional overturning streamfunction v: lattitudinal velocity, 3 dim array (lon, lat, z) dz: z layer height (possibly array) dx: longitudinal cell size (probably array for lattitude dependend) """ if len(v.shape)!=3: raise Exception("v dim !=3") #integrate over longitude vint=(v.transpose((0,2,1))*dx).transpose((0,2,1)) vint=vint.sum(axis=0) #depth integration vint=(vint*dz).cumsum(axis=-1) psim=numpy.zeros( (v.shape[1], v.shape[2]+1))*vint[0,0] psim[:,1:]=-vint return psim
06033d1e2916508e233c83479ba248450317654e
18,816