content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _mock_kernel(x1, x2, history): """A kernel that memorizes its calls and encodes a fixed values for equal/unequal datapoint pairs.""" history.append((x1, x2)) if x1 == x2: return 1 else: return 0.2
d6319a63b987a162d99d810aebabb737328c2d9e
50,884
import math def rotate_point(centerPoint,point,angle): """Rotates a point around another centerPoint. Angle is in degrees. Rotation is counter-clockwise""" angle = math.radians(angle) # print(centerPoint, point) temp_point = point[0]-centerPoint[0] , point[1]-centerPoint[1] temp_point = ( temp_point[0]*math.cos(angle)-temp_point[1]*math.sin(angle) , temp_point[0]*math.sin(angle)+temp_point[1]*math.cos(angle)) temp_point = int(temp_point[0]+centerPoint[0]) , int(temp_point[1]+centerPoint[1]) return temp_point
0c6635653d1b9dad1269bb69321cea3f859713a5
50,885
from typing import TextIO from typing import Tuple def strip_head(source: TextIO, line: str) -> Tuple[TextIO, str]: """Purely recursive strip headings until a blank line. >>> import io >>> data = io.StringIO( "heading\\n\\nbody\\nmore\\n" ) >>> tail, first = strip_head(data, data.readline()) >>> first 'body\\n' >>> list(tail) ['more\\n'] """ if len(line.strip()) == 0: return source, source.readline() return strip_head(source, source.readline())
efa7e8c430e909791121764cc7a840c6fe56ccf6
50,886
def drop_missing_data(df,threshold=0.5,drop_cols=[]): """Process missing columns""" if not drop_cols: rows = len(df) num_nonna = round((1-threshold) * rows,0) for k,v in (df.isnull().sum()/rows).items(): if v>threshold: drop_cols.append(k) d= df.dropna(axis=1,thresh = num_nonna) else: d= df.drop(drop_cols,axis=1) return d,drop_cols
659e18f6eb6c92431b14bf6c63df500ba325adaf
50,887
def newStr(name, theType, redirect, category, verse, url): """ Update string formatting. """ ddgStr = [ name, # name theType, # type redirect, # redirect "", # otheruses category, # categories "", # references "", # see_also "", # further_reading "", # external_links "", # disambiguation "", # images verse, # abstract url+"\n" # source_url ] return "%s" % ("\t".join(ddgStr))
84d0eb90dba129cd2a670ab8c12d45305d04c39e
50,888
def parallel_verses(ENG_lines, INA_lines): """ Finds the parallel verses of two texts. Used on passages that use verses. E.g. the Bible Inputs: ENG_lines - A list of the English sentences INA_lines - A list of the Interlingua sentences Output: A list of pairs of translated sentences """ out = [] for INA_line in INA_lines: first_colon = INA_line.find(":") if first_colon != -1 and INA_line[first_colon-1].isdigit() and INA_line[first_colon+1].isdigit: start = first_colon - 1 while start >= 0: if not INA_line[start].isdigit(): break start = start - 1 start = start + 1 end = first_colon + 1 while end < len(INA_line): if not INA_line[end].isdigit(): break end = end + 1 verse_number = INA_line[start:end] for ENG_line in ENG_lines: if ENG_line.find(verse_number) != -1: out.append([ENG_line, INA_line]) break return out
ba269445535f5b109b104f1a74e75392840ac7f2
50,889
from re import M def s_init(n, sp): """ from 20120516B """ if n == 0: return 123456789 else: return (22695477 * sp + 12345) % M
a076e6276b44223b2258bc0f357b82fd7b68b872
50,890
def read_var_int(handle): """ Read a variable-length integer from the given file handle. """ res = 0 shift = 1 while True: byte = handle.read(1)[0] res += (byte & 0x7f) * shift if byte & 0x80: break shift <<= 7 res += shift return res
380a708925bf1d07fb0a81de073b559aed16eb0c
50,892
def cleanHTML(html, lowercase=False, removeNonAscii=False, cutoffFooter=False, descriptionOnly=False, replaceDict=None): """ Args: html: html string lowercase: boolean indicating if the html should be sent to lowercase removeNonAscii: boolean indicating if the html should have non ascii characters removed replaceDict: a dictionary where keys represent words to be replaced by their values Returns: The html with the following adjustments: Replace following with spaces: ( ) / send to lowercase remove non ascii """ if lowercase: html = html.lower() if removeNonAscii: html = ''.join([i if ord(i) < 128 else ' ' for i in html]) if replaceDict: for word, replaceWord in replaceDict.items(): html = html.replace(word.lower(), replaceWord) if cutoffFooter: html = html.split("html = html.split(" ")") html = html[0] # if descriptionOnly: # "jobsearch-JobComponent-description" return html
6b6efa1855cd98ab7ae4f6a70c8abb1ca35705af
50,893
import re def hump2underline(hump_str): """ Convert the hump form string to an underscore :param str hump_str: hump form string :return: All lowercase underlined string of letters :rtype: str """ patt = re.compile(r'([a-z]|\d)([A-Z])') underline_str = re.sub(patt, r'\1_\2', hump_str).lower() return underline_str
624ccb5538743d0706619b3768f301bf276b8cc2
50,894
def get_spawning_profile_list(intersection): """ Returns the spawning profile list of the intersection :param intersection: intersection :type intersection: Intersection :return: The spawning profile list of the intersection """ return intersection.get_spawning_profile_list()
1239299901bb8109d264a948d1d47d8d02ff19b9
50,896
def process_fields(tag): """ Process the 'field' element of a tag dictionary. Process the fields string - a comma-separated string of "key-value" pairs - by generating key-value pairs and appending them to the tag dictionary. Also append a list of keys for said pairs. :param tag: dict containing a tag :returns: dict containing the key-value pairs from the field element, plus a list of keys for said pairs """ fields = tag.get('fields') if not fields: # do nothing return {} # split the fields string into a dictionary of key-value pairs result = dict(f.split(':', 1) for f in fields.split('\t')) # append all keys to the dictionary result['field_keys'] = sorted(result.keys()) return result
41e7e958fe350135559673ae7877ffb1f254caee
50,897
def join_with_function(func, values1, values2): """Join values using func function.""" return [ func(value1, value2) for value1, value2 in zip(values1, values2) ]
6548c0777c14d2e02a2fe3579e0cc368a9729a32
50,898
def get_AllRegions(self, classes=None, order='document', depth=0): """ Get all the *Region element or only those provided by ``classes``. Returned in document order unless ``order`` is ``reading-order`` Arguments: classes (list) Classes of regions that shall be returned, e.g. ``['Text', 'Image']`` order ("document"|"reading-order"|"reading-order-only") Whether to return regions sorted by document order (``document``, default) or by reading order with regions not in the reading order at the end of the returned list (``reading-order``) or regions not in the reading order omitted (``reading-order-only``) depth (int) Recursive depth to look for regions at, set to `0` for all regions at any depth. Default: 0 For example, to get all text anywhere on the page in reading order, use: :: '\\n'.join(line.get_TextEquiv()[0].Unicode for region in page.get_AllRegions(classes=['Text'], depth=0, order='reading-order') for line in region.get_TextLine()) """ if order not in ['document', 'reading-order', 'reading-order-only']: raise Exception("Argument 'order' must be either 'document', 'reading-order' or 'reading-order-only', not '{}'".format(order)) if depth < 0: raise Exception("Argument 'depth' must be an integer greater-or-equal 0, not '{}'".format(depth)) ret = self._get_recursive_regions([self], depth + 1 if depth else 0, classes) if order.startswith('reading-order'): reading_order = self.get_ReadingOrder() if reading_order: reading_order = reading_order.get_OrderedGroup() or reading_order.get_UnorderedGroup() if reading_order: reading_order = self._get_recursive_reading_order(reading_order) if reading_order: id2region = {region.id: region for region in ret} in_reading_order = [id2region[region_id] for region_id in reading_order if region_id in id2region] # print("ret: {} / in_ro: {} / not-in-ro: {}".format( # len(ret), # len([id2region[region_id] for region_id in reading_order if region_id in id2region]), # len([r for r in ret if r not in in_reading_order]) # )) if order == 'reading-order-only': ret = in_reading_order else: ret = in_reading_order + [r for r in ret if r not in in_reading_order] return ret
c463b16212266cad95afc8b76de8b75ed8ed1820
50,901
def blue_channel(image): """ Changes the image as follows: For every pixel, set red and green values to 0, yielding the blue channel. Return the changed image. """ for pixel in image: pixel.red = 0 pixel.green = 0 return image
5f24af13f079ef7d73f9e5bb3adf4928db96c6e5
50,903
def A_int(freqs,delt): """Calculates the Intermediate Amplitude Parameters ---------- freqs : array The frequencies in Natural units (Mf, G=c=1) of the waveform delt : array Coefficient solutions to match the inspiral to the merger-ringdown portion of the waveform """ return (delt[0]+delt[1]*freqs+delt[2]*freqs**2+delt[3]*freqs**3+delt[4]*freqs**4)
8fcdb60271e5938faad7f89d3d08bc78281a88f6
50,904
def getEventNames(): """Return Event name.""" return ['HandStart', 'FirstDigitTouch', 'BothStartLoadPhase', 'LiftOff', 'Replace', 'BothReleased']
675afec83e01581c91e16cd97c6c9c6dff85988f
50,905
def mpe(actual, forecast): """ Calculate mean percentage error (MPE) :param actual: :param forecast: :return: """ if actual.shape == forecast.shape: return ((actual - forecast) / actual).sum() / actual.shape[0]
b1963154f2dbb0a1abebbb361593e759352e3c3a
50,906
import math def tangent_points_to_circle_xy(circle, point): """Calculates the tangent points on a circle in the XY plane. Parameters ---------- circle : tuple center, radius of the circle in the xy plane. point : tuple XY(Z) coordinates of a point in the xy plane. Returns ------- points : list of tuples the tangent points on the circle Examples -------- >>> circle = (0, 0, 0), 1. >>> point = (2, 4, 0) >>> t1, t2 = tangent_points_to_circle_xy(circle, point) >>> Point(*t1), Point(*t2) (Point(-0.772, 0.636, 0.000), Point(0.972, -0.236, 0.000)) """ m, r = circle[0], circle[1] cx, cy = m[0], m[1] px = point[0] - cx py = point[1] - cy a1 = r*(px*r - py*math.sqrt(px**2 + py**2 - r**2))/(px**2 + py**2) a2 = r*(px*r + py*math.sqrt(px**2 + py**2 - r**2))/(px**2 + py**2) b1 = (r**2 - px*a1)/py b2 = (r**2 - px*a2)/py p1 = (a1 + cx, b1 + cy, 0) p2 = (a2 + cx, b2 + cy, 0) return p1, p2
a5d09b4622a043ea12330c8516e6d7f3fc591aa2
50,907
import re def ends_in_file(path): """ Return True when path ends with '.%ext' or '%fn' """ _RE_ENDEXT = re.compile(r"\.%ext[{}]*$", re.I) _RE_ENDFN = re.compile(r"%fn[{}]*$", re.I) return bool(_RE_ENDEXT.search(path) or _RE_ENDFN.search(path))
b7087c407a474e9705aebe487a73a2daad124599
50,908
import re def str2float(text): """ Remove uncertainty brackets from strings and return the float. """ try: return float(re.sub("\(.+\)", "", text)) except TypeError: if isinstance(text, list) and len(text) == 1: return float(re.sub("\(.+\)", "", text[0]))
d97443664e1beb3535d9b1a162a19db2f3e6ed17
50,909
from pathlib import Path def prov_data_paths(data_dir: Path) -> dict: """Build a dictionary to map a province to it's GeoPackage file.""" paths = {} for item in data_dir.rglob('*NRN_*.gpkg'): parts = item.name.split('_') prcode = parts[1] major = parts[2] minor = parts[3] if '.' in minor: minor = minor.split('.')[0] paths[prcode] = {'path': item, 'major': major, 'minor': minor} return paths
db1e1e7024f8f2138b93c79811da230858246ea3
50,912
import math def resname(residue, residue_mass_dict): """ Currently this does not support residues with masses within 0.05. It may return the name of either residue :param residue: :param residue_mass_dict: :return: """ for resname, mass in residue_mass_dict.items(): if math.isclose(residue.mass, mass, abs_tol=0.05): return resname print("the relevant residues are: \n", residue.atoms.names, "\n and ") raise ValueError(f'mass of residue, {residue.mass}, is not in name_mass_dict,' f' {residue_mass_dict}')
af474cef0b339e069c2fd1845af7e949f57e2890
50,913
def display_seconds(secondNum): """根据秒数显示标准的时间间隔。 Args: secondNum: 秒数,正整数。 Returns: convertStr: 将秒数换算成易懂的时间间隔字符串。 """ secondStr = str(secondNum % 60)[:5] convertStr = secondStr+'s ' if secondNum > 59: minuteNum = secondNum // 60 minuteStr = str(minuteNum % 60) convertStr = minuteStr + 'm ' + convertStr if minuteNum > 59: hourNum = minuteNum // 60 hourStr = str(hourNum % 24) convertStr = hourStr + 'h ' + convertStr if hourNum > 23: dayNum = hourNum // 24 convertStr = str(dayNum) + 'd ' + convertStr return convertStr
b0a281f7ad2d663a28bb837250980bdf89d63901
50,914
def set_publish_model_args(args): """Set args to publish model """ public_model = {} if args.black_box: public_model = {"private": False} if args.white_box: public_model = {"private": False, "white_box": True} if args.model_price: public_model.update(price=args.model_price) if args.cpp: public_model.update(credits_per_prediction=args.cpp) return public_model
f1e07471f893e08893e3c118b29e84eb8e37d453
50,915
def regex_result_escape_recursive(result): """ Regex search handler: De-construct objects that implement a JSON like structure in our present schema. Args: result (MatchObject): Match result from a previous regex match. """ result = result.group(0).replace('"', "'").replace("'{", '"{').replace("}'", '}"') return result
58d9e9209e1b68ac2cc345bd81c65af2825cec2b
50,918
def luminance_to_retinal_illuminance(XYZ, absolute_adapting_field_luminance): """ Converts from luminance in :math:`cd/m^2` to retinal illuminance in trolands. Parameters ---------- XYZ : array_like, (3,) *CIE XYZ* colourspace matrix. absolute_adapting_field_luminance : numeric Absolute adapting field luminance in :math:`cd/m^2`. Returns ------- ndarray Converted *CIE XYZ* colourspace matrix in trolands. Examples -------- >>> XYZ = np.array([19.01, 20., 21.78]) >>> Y_0 = 318.31 >>> luminance_to_retinal_illuminance(XYZ, Y_0) # doctest: +ELLIPSIS array([ 479.4445924..., 499.3174313..., 534.5631673...]) """ return 18. * (absolute_adapting_field_luminance * XYZ / 100.) ** 0.8
1c3d75453e4999cf8d9d58263bfed417f09c70ec
50,919
def formatInt(aInterval): """ Format an 3-column interval correctly """ return [aInterval[0], int(aInterval[1]), int(aInterval[2])]
2fd14d2397f282f52576fad6730541fa1346f5e9
50,920
import difflib def get_best_match(texts, match_against, ignore=' ', treshold=0.9): """Get the best matching from texts, none if treshold is not reached texts: list of texts to choose from match_against: text wanted ignore: junk characters (set eg to "_ ") treshold: best match must be at least this value. """ # JUNK = space _ # now time to figre out the matching ratio_calc = difflib.SequenceMatcher(lambda x: x in ignore) ratio_calc.set_seq1(match_against) ratios = {} best_ratio = 0 best_text = '' for text in texts: # set up the SequenceMatcher with other text ratio_calc.set_seq2(text) # calculate ratio and store it ratios[text] = ratio_calc.ratio() # if this is the best so far then update best stats if ratios[text] > best_ratio: best_ratio = ratios[text] best_text = text if best_ratio > treshold: return best_text
74fffbd7ed3f0a90594bf61604fa25962d72cc49
50,921
from datetime import datetime import pytz def get_date(prompt: str, timezone: str) -> datetime: """ Obtains a date from user input. """ date_str = input(f'Enter date of {prompt} (yy-mm-dd hh:mm): ') date = datetime.strptime(date_str, "%y-%m-%d %H:%M") print(f'The date you entered is: {date}') return date.replace(tzinfo=pytz.timezone(timezone))
26dca58b6cb4edc3fd61032ed931aa3963efc63b
50,923
import time def int_timestamp(): """ 返回现在的int型的时间戳 :return: 返回现在的int型的时间戳 """ return int(time.time())
81f0d177df6d7fa82de2a40d9504bb38b6739399
50,925
import os def GetGlogDir(): """Get glog directory. It should match the logic with GetTempDirectories in third_party/glog/src/logging.cc On Windows, GetTempPathA will be $TMP, $TEMP, $USERPROFILE and the Windows directory. http://msdn.microsoft.com/ja-jp/library/windows/desktop/aa364992(v=vs.85).aspx Returns: a directory name. """ candidates = [os.environ.get('TEST_TMPDIR', ''), os.environ.get('TMPDIR', ''), os.environ.get('TMP', '')] for tmpdir in candidates: if os.path.isdir(tmpdir): return tmpdir return '/tmp'
2cbb4dcff0758e39bfeb9f24f9c9649713e719db
50,926
def ex_cobegin5(): """ cobegin { writeln("#1 line."); writeln("#2 line."); writeln("#3 line."); writeln("#4 line."); writeln("#5 line."); } """ return None
1ddb33aeff9e01150773375cc46c1c02a32178be
50,927
def reverse(un_list): """This function aims to reverse a list""" empty_list = [] if un_list == []: return [] else: for i in range(len(un_list)-1,0,-1): empty_list += [un_list[i]] return empty_list + [un_list[0]]
45277f746ce4b4e0f5da696b5f9fa94df22e699c
50,929
def tmp_config(transformations=None, remove_transactions=None, custom_category_map=None): """Helper to easily initialize a config""" return { "settings": dict( transformations=transformations, remove_transactions=remove_transactions, custom_category_map=custom_category_map, ) }
e62d0187c0dd5b8b62441e8c8adf4a7d948aacc5
50,930
def serialize_dtype(o): """ Serializes a :obj:`numpy.dtype`. Args: o (:obj:`numpy.dtype`): :obj:`dtype` to be serialized. Returns: A dictionary that can be passed to :obj:`json.dumps`. """ if len(o) == 0: return dict( _type='np.dtype', descr=str(o)) return dict( _type='np.dtype', descr=o.descr) # res = [] # for k in range(len(o)): # res.append((o.names[k], str(o[k]))) # return dict( # _type='np.dtype', # desc=res)
3990072bde75c1133cea37d79cf54b1945c536af
50,932
def print_heart(s='♥'): """输出一个心脏的符号 :param s: 字符串 :return: 心脏的格式 """ ls = [] s += ' ' for y in range(15, -15, -1): flag = [] for x in range(-30, 30): x1 = x * 0.04 y1 = y * 0.1 m = (x1 ** 2 + y1 ** 2 - 1) ** 3 - x1 ** 2 * y1 ** 3 # 心脏公式 flag.append(s[(x - y) % len(s)] if m <= 0 else ' ') ls.append(''.join(flag) + '\n') print("\033[5;31m" + ''.join(ls) + '\033[0m') return ls
2d4c8e97764431940873e7a118b563b7d88163c1
50,933
import math def pseudo_root_search_mathias(eos, r, temp, a_mix, b_mix, desired_phase, kappa, search_iterations=10): """ Solves the Mathias constraint given by d_press_d_rho - 0.1*r*temp == 0 for SRK and PR equations of state. This method is technically independent of the EOS provided the relevant derivatives exist. Input parameters are the eos, gas_constant as r, temp, a_mix and b_mix which are from the eos mixture calculation. The desired phase is either 'vap' or 'liq' indicated a vapor like phase or a liquid like phase to be returned. This setting also determines the interval for the root search search iterations is how long newton search continues before terminating, default is 10. Search should conclude in no more than 4-5 iterations. """ # 0 is SRK # 1 is PR # Bounds for the rho for a given eos # This is method is independent of the actual EOS, # But need rho_lo, rho_hi, rho_mc, temp_mc and dpress_drho and d2press_drho2 for each each of equation # Only SRK and PR are implemented for now. # Only the rho changes for these equations, so no new mixing terms need to be calculated # Should converge very quickly SQRT_2 = math.sqrt(2) converged = False if eos == 0: u, w = 1, 0 rho_lo, rho_hi = -1 / b_mix, 1 / b_mix # From original Mathias paper rho_mc = 0.25599 / b_mix temp_mc = 0.20268 * a_mix / (r * b_mix) elif eos == 1: u, w = 2, -1 rho_lo, rho_hi = (1 - SQRT_2) / b_mix, 1 / b_mix # From Watson rho_mc = 0.25308 / b_mix temp_mc = 0.17014 * a_mix / (r * b_mix) else: return False, -1, -1, -1, -1, -1 if desired_phase == 'vap': rho_interval_lo, rho_interval_hi = rho_lo, kappa * rho_mc elif desired_phase == 'liq': rho_interval_lo, rho_interval_hi = rho_mc, rho_hi else: return False, -1, -1, -1, -1, -1 scaling = 1 / (r * temp) # scaling = 1 if desired_phase == 'liq': # initial_estimate - given by Mathias rho_test = rho_hi - 0.4 * (rho_hi - rho_mc) else: rho_test = (rho_interval_lo + rho_interval_hi) * 0.5 # rho_test = rho_hi - 0.4*(rho_hi - rho_mc) for j in range(search_iterations): # EOS in terms of rho (which 1/vol) # press = r*temp/(-b_mix + 1/rho) - a_mix/(w*b_mix**2 + u*b_mix/rho + rho**(-2)) # Derivative of the EOS in terms of rho_test d_press_d_rho = r * temp / (rho_test ** 2 * (-b_mix + 1 / rho_test) ** 2) - ( u * b_mix / rho_test ** 2 + 2 / rho_test ** 3) * a_mix / ( w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2)) ** 2 f = (d_press_d_rho - 0.1 * r * temp) f *= scaling if f < 1e-6: converged = True break # 2nd Derivative of the EOS in terms of rho root d2_press_d_rho_2 = 2 * ( -r * temp / (b_mix - 1 / rho_test) ** 2 - r * temp / (rho_test * (b_mix - 1 / rho_test) ** 3) + ( u * b_mix + 3 / rho_test) * a_mix / ( w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2)) ** 2 - ( u * b_mix + 2 / rho_test) ** 2 * a_mix / ( rho_test * (w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2)) ** 3)) / rho_test ** 3 d2_press_d_rho_2 *= scaling df_drho = d2_press_d_rho_2 rho_test_new = rho_test - f / df_drho if rho_test_new < rho_interval_lo: rho_test_new = (rho_test + rho_interval_lo) / 2 elif rho_test_new > rho_interval_hi: rho_test_new = (rho_test + rho_interval_hi) / 2 rho_test = rho_test_new # if not converged: # print('press_rho did not converge') return converged, rho_mc, rho_lo, rho_hi, rho_test, temp_mc
d2a536dbe3b570492fb523f042098576187942f0
50,934
def _batch_to_observation(batch, obs_shape): """ Convert a batch of size 1, to a single observation. """ if batch.shape != obs_shape: assert batch.shape == (1, *obs_shape), f'batch.shape={repr(batch.shape)} does not correspond to obs_shape={repr(obs_shape)} with batch dimension added' return batch.reshape(obs_shape) return batch
4ee9c18f38f44de99eb06b7e37913f15627d8ff3
50,939
import numpy def _inverse_logit(input_values): """Implements the inverse logit function, defined below. f(x) = exp(x) / [1 + exp(x)] :param input_values: numpy array of logits. :return: output_values: equivalent-size numpy of non-logits. """ return numpy.exp(input_values) / (1 + numpy.exp(input_values))
f7cbfe41f2fa3457cfd03e4e4c55e08df50fd166
50,940
def wannetworkid_to_name_dict(sdk_vars, sdk_session): """ Create a Site ID <-> Name xlation constructs :param passed_sdk_vars: sdk_vars global info struct :return: xlate_dict, a dict with wannetworkid key to wan_network name. wan_network_list, a list of wan_network IDs """ id_xlate_dict = {} name_xlate_dict = {} wan_network_id_list = [] wan_network_name_list = [] wan_network_id_type = {} resp = sdk_session.get.wannetworks() status = resp.cgx_status raw_wan_networks = resp.cgx_content wan_networks_list = raw_wan_networks.get('items', None) if not status or not wan_networks_list: print("ERROR: unable to get wan networks for account '{0}'.".format(sdk_vars['tenant_name'])) return {}, {}, [], [] # build translation dict for wan_network in wan_networks_list: name = wan_network.get('name') wan_network_id = wan_network.get('id') wn_type = wan_network.get('type') if name and wan_network_id: id_xlate_dict[wan_network_id] = name name_xlate_dict[name] = wan_network_id wan_network_id_list.append(wan_network_id) wan_network_name_list.append(name) if wan_network_id and wn_type: wan_network_id_type[wan_network_id] = wn_type return id_xlate_dict, name_xlate_dict, wan_network_id_list, wan_network_name_list, wan_network_id_type
25c47856d0c2606d91b367a7a33672553236d553
50,942
def terminateStrOnZero (str): """Iterate through string and terminate on first zero """ idx=0 while idx < len(str) and str[idx] != '\00': idx += 1 return str[:idx]
b09aee23bacf716f12b335be09f341ea21bbe3ee
50,943
def simple_undo(pig_latin): """Undoing simplest translation""" pig_latin += " " words = pig_latin.split("ay ") return " ".join([word[-1] + word[:-1] for word in words[:-1]])
d325f041d3ba8a7d8c0acbabd768786e54f78a09
50,945
def cons_tuple(head, tail): """Implement `cons_tuple`.""" return (head,) + tail
31fbd4b46c755f4e717592a9a3eedf1b8830d2c2
50,947
def final_constraints(board, constraints): """ Purpose: When the board is finished, do a final check to see if any of the values are duplicates @param board The Sudoku board: A list of lists of variable class instances @param constraints The unallowed values for each cell, list of tuples """ while constraints: ([irow, icol], [jrow, jcol]) = constraints.pop() xi = board[irow][icol] xj = board[jrow][jcol] if not xi.get_only_value() or not xj.get_only_value(): return False if xi.get_only_value() == xj.get_only_value(): return False return True
75a95fc76bffb6ad6454a34f4c64e5d06c0c6cc1
50,948
def _pairs(sequence): """Source: https://github.com/Akuli/teek/blob/master/teek/_tcl_calls.py""" return zip(sequence[0::2], sequence[1::2])
b64d216c232a000a1f360faed1f95bfb64cc3c5a
50,949
def _mocked_handle_column(mocked_handle_column): """Alias of mocked_handle_column to suppress unused argument.""" return mocked_handle_column
292017b0d6ef76b1ac657390c20da21021f34746
50,950
def role_permissions(permission): """Role permissions.""" return [permission]
4bcf1b7bb667172415ebfa63f71ad6c43d8cc399
50,951
def dim(s,i): """Dimension of the slice list for index i.""" return s[i].stop-s[i].start
9ed12f89f5a75a84bd92bb903c6d4ff018e89aaa
50,952
def get_value_using_path(obj, path): """Get the attribute value using the XMLpath-like path specification. Return any attribute stored in the nested object and list hierarchy using the 'path' where path consists of: keys (selectors) indexes (in case of arrays) separated by slash, ie. "key1/0/key_x". Usage: get_value_using_path({"x" : {"y" : "z"}}, "x")) -> {"y" : "z"} get_value_using_path({"x" : {"y" : "z"}}, "x/y")) -> "z" get_value_using_path(["x", "y", "z"], "0")) -> "x" get_value_using_path(["x", "y", "z"], "1")) -> "y" get_value_using_path({"key1" : ["x", "y", "z"], "key2" : ["a", "b", "c", "d"]}, "key1/1")) -> "y" get_value_using_path({"key1" : ["x", "y", "z"], "key2" : ["a", "b", "c", "d"]}, "key2/1")) -> "b" """ keys = path.split("/") for key in keys: if key.isdigit(): obj = obj[int(key)] else: obj = obj[key] return obj
92a1d0069a0651151332fd890901b603b242064d
50,954
def build_http_request(method: str, path: str, host: str, extra_headers=[], body: str = "") -> str: """ Returns a valid HTTP request from the given parameters. Parameters: - `method` - valid HTTP methods (e.g. "POST" or "GET") - `path` - the path part of a URL (e.g. "/" or "/index.html") - `host` - the host of the endpoint (e.g. "google.com" or "ualberta.ca") - `extra_headers` - an optional list of strings to be included as part of the request headers (e.g. ["Content-Type": "application/json"]) - `body` - the optional body of the request (if any) Returns: A string representation of a valid HTTP request """ status_line = f"{method} {path} HTTP/1.1" headers = [ f"Host: {host}", "Connection: close", "User-Agent: sumitro-client/1.0" ] if len(extra_headers) > 0: headers.extend(extra_headers) payload = "\r\n" if len(body) > 0 or method == "POST": payload += body headers.append(f"Content-Length: {len(body)}") request_body = "\r\n".join([status_line, "\r\n".join(headers), payload]) return request_body
4cc0bf8f5ed73788eb3e7a240bbc73f0a5a73d24
50,956
def filter_keys(func, a_dict): """Return a copy of adict with only entries where the func(key) is True. Equivalent to the following in Python 3: {k:v for (k, v) in a_dict.items() if func(k)} """ return dict((k, v) for (k, v) in a_dict.items() if func(k))
b06ec1803c6176e313fd1568df7fd9199f846554
50,958
def get_unused_var_name(prefix, var_table): """ Get a new var name with a given prefix and make sure it is unused in the given variable table. """ cur = 0 while True: var = prefix + str(cur) if var not in var_table: return var cur += 1
bdec5b16d3fc018e8c8efa13794fe55f71ac3702
50,959
import time def convert_readable_time(longTime): """ Convert date and output into human readable format. 2018-01-03 --> Jan 03, 2018 """ try: t = time.strptime(longTime, "%Y-%m-%d") except ValueError: return longTime ret = time.strftime("%b %d, %Y", t) return ret
4cfa34da1eb131823a8019dbd0023d64f44d9f05
50,961
import random def get_random_int(length=8): """Get a random integer. :param length: the max len of letters. By default 8 :return: the random integer. """ num_list = ['9'] * length max_num = int(''.join(num_list)) return random.randint(0, max_num)
0248b9f5d40ae69e7324857d9e2300cdda57c972
50,962
import re def extract_pull_request_number(commit_logs): """Extract first occurrence of pull request number from commit logs. :param str commit_logs: oneline commit logs :return: pull request number :rtype: str """ m = re.search(r'pull request #(\d+)', commit_logs) if not m: raise Exception( 'cannot detect pull request number from\n{}'.format(commit_logs)) return m.group(1)
3d2be6f8610971c0d1f7fdc189f272f32b9716f7
50,963
def mask_to_cidr(mask): """ Determine the CIDR suffix for a given dotted decimal IPv4 netmask. """ # convert netmask to 32 binary digits tmp = "".join([format(int(x), "08b") for x in mask.split(".")]) # count leading ones return len(tmp) - len(tmp.lstrip("1"))
81cf610029fadc6bd17bd735d3a4a8d0e2e73734
50,964
def _GetEdgeData(faces): """Find edges from faces, and some lookup dictionaries. Args: faces: list of list of int - each a closed CCW polygon of vertex indices Returns: (list of ((int, int), int), dict{ int->list of int}) - list elements are ((startv, endv), face index) dict maps vertices to edge indices """ edges = [] vtoe = dict() for findex, f in enumerate(faces): nf = len(f) for i, v in enumerate(f): endv = f[(i + 1) % nf] edges.append(((v, endv), findex)) eindex = len(edges) - 1 if v in vtoe: vtoe[v].append(eindex) else: vtoe[v] = [eindex] return (edges, vtoe)
f5c3c283868d5f2af02cbfaceeec3d6f8db01fab
50,965
import numpy as np def convert_blender_mat_to_numpy_arr(blender_mat): """ Deprecated: A simple cast should work (Printing a numpy array using the internal command line of blender crashes blender) """ mat_as_lists_of_lists = [] for row in blender_mat: mat_as_lists_of_lists.append([entry for entry in row]) return np.array(mat_as_lists_of_lists)
a89265c0248909c9fcc67efb97ad1cfb4a646b1b
50,966
def __find_service_account_in_message(message): """ The command "gcloud logging sinks create", communicates a service account Id as part of its message. Knowing the message format, this function extracts the service account Id and returns it to the caller, which will grant it with BQ permissions. Sample message: "Created [https://logging.googleapis.com/v2/projects/hipaa-sample-project/sinks/audit-logs-to-bigquery]. Please remember to grant `serviceAccount:p899683180883-075251@gcp-sa-logging.iam.gserviceaccount.com` the WRITER role on the dataset. More information about sinks can be found at https://cloud.google.com/logging/docs/export/configure_export" :param message: the message communicated by "gcloud logging sinks create" command :return: the service account Id that requires BQ permissions """ service_account = [t for t in message.split() if t.startswith('`serviceAccount:')] if service_account: service_account = service_account[0].replace('`', '') service_account = service_account.replace('serviceAccount:', '') return service_account
e791c81d62dae49b6298874dc194eda712e99857
50,967
def pluralize(n: int, singular: str, plural: str) -> str: """Choose between the singular and plural forms of a word depending on the given count. """ if n == 1: return singular else: return plural
a239d3d7304ba4fd5505c6bb1bc093392e9065e3
50,969
def some_sample_config(some_sample_path): """Return a list containing the key and the sample path for some config.""" return ["--config", some_sample_path]
257be0b4fcee1dbd340097a3f219911773ec9a31
50,970
import unicodedata def report_ucs_msg(ucs, wcwidth_libc, wcwidth_local): """ Return string report of combining character differences. :param ucs: unicode point. :type ucs: unicode :param wcwidth_libc: libc-wcwidth's reported character length. :type comb_py: int :param wcwidth_local: wcwidth's reported character length. :type comb_wc: int :rtype: unicode """ ucp = (ucs.encode('unicode_escape')[2:] .decode('ascii') .upper() .lstrip('0')) url = "http://codepoints.net/U+{}".format(ucp) name = unicodedata.name(ucs) return (u"libc,ours={},{} [--o{}o--] name={} val={} {}" " ".format(wcwidth_libc, wcwidth_local, ucs, name, ord(ucs), url))
402580d636951290037eaddf9651aa4cd50b72a9
50,971
def generic_num_paths_to_target_bag( data: list[list[int]], bags: list[str], target: str ) -> int: """Generic solution""" ends_at_target = [0] * len(bags) target_idx = bags.index(target) def get_possible_parent_indices(bag_idx: int) -> list[int]: parent_counts = [row[bag_idx] for row in data] return [i for i, count in enumerate(parent_counts) if count > 0] def find_possible_sources(bag_idx: int) -> None: possible_parents = get_possible_parent_indices(bag_idx) for parent_idx in possible_parents: if ends_at_target[parent_idx] != 1: ends_at_target[parent_idx] = 1 find_possible_sources(parent_idx) find_possible_sources(target_idx) return sum(ends_at_target)
0b39a178905ff091bcb6b62a8346c78382a38f5a
50,972
def build_cli_args(archive_filename, out_dir): """ Build the arguments to use a command-line interface to a simulator to execute a COMBINE/OMEX archive Args: archive_filename (:obj:`str`): path to a COMBINE/OMEX archive out_dir (:obj:`str`): directory where outputs should be saved Returns: :obj:`list` of :obj:`str`: command-line arguments to execute a COMBINE/OMEX archive """ return [ '-i', archive_filename, '-o', out_dir, ]
c3f6262d5c65ac62a8ff9d1e03f5dc9b45ba7b08
50,973
from typing import Union from pathlib import Path def is_dir(dir_path: Union[Path, str]) -> bool: """If this path is a directory, return True.""" return Path(dir_path).is_dir()
6fd5edecc66fe92591ac7fb97d149b6f4bd14890
50,974
import json def try_parse_json(text): """ Parse the given JSON and return the constructed object. Return the given text unmodified if parsing as JSON fails. Return `None` if the given text is empty. """ if not text: return None try: return json.loads(text) except json.JSONDecodeError: return text
18bea77a9913266b7d07d6c2aa77591c0d2b3246
50,975
def find_merge(head1, head2): """ Given the heads of two linked lists, returns the first node where they merge or None if no such node exists. INTUITION: If head1 and head2 happen to be equidistant from the merge node (that is, if both linked lists had equal length), then it's easy: just advance one-by-one until a match is found. Indeed, it would be easy if we could traverse both lists *backwards* from the tail. To reduce to this simpler problem, we adjust the head pointers until they are equidistant. """ len1 = 0 ptr1 = head1 while ptr1: len1 += 1 ptr1 = ptr1.next_node len2 = 0 ptr2 = head2 while ptr2: len2 += 1 ptr2 = ptr2.next_node # Make each pointer equally far from the end. ptr1, ptr2 = head1, head2 while len1 > len2: ptr1 = ptr1.next_node len1 -= 1 while len2 > len1: ptr2 = ptr2.next_node len2 -= 1 # Advance both pointers one-by-one until a match is found. while ptr1: if ptr1 == ptr2: return ptr1 ptr1 = ptr1.next_node ptr2 = ptr2.next_node return None
3288c1bd2c820faced85be276a38cefd509e77d3
50,976
def demo() -> int: """ A very simple function to show docstrings with doctests are executed by pytest. Use this function like this: >>> demo() 42 """ return 42
30d3f1d85f666cee90e358c125c0ab10b1055ae5
50,977
import numpy def get_num_vertices(triangles): """ Get the number of vertices in a triangulation. :param triangles: List of triangles in the triangulation. :returns: Number of vertices in the triangulation. """ return numpy.amax(numpy.reshape(triangles, -1)) + 1
0a0aee57cd7d3aab0f02ef9099d26124a71b9ebc
50,981
import torch def absolute_end_point_error(outputs, labels): """ AUTHORS: -------- :author: Alix Leroy DESCRIPTION: ------------ Calculate the Absolute End Point Error between two tensors PARAMETERS: ----------- :param outputs: Output tensor :param labels: Target tensor RETURN: ------- :return: The Absolute End Point Error (L2 distance between the two tensors) """ return torch.dist(outputs, labels, 2)
3a768cdffc0792b77fa74850f8b2e50b8c98b45d
50,982
def retry_request(times): """ Request Retry Decorator. :param times: The number of times to repeat the wrapped function/method. :return: """ def decorator(function): def wrapper(*args, **kwargs): attempt = 0 while attempt < times: response = function(*args, **kwargs) if response.status_code == 404: attempt += 1 continue return response return function(*args, **kwargs) return wrapper return decorator
b626fcea2de9123dfae4a135d7f7f50b950ed562
50,984
def separate_profile(profile_updated_df): """ INPUT : profile_updated_df : dataframe of profile RETURN : profile_updated_main : updated profile df for main profile, age < 100 profile_updated_sp : updated profile df for special profile, age >= 100 """ # sparate data with age < 100 and age >= 100, missing value on gender and income #main profile profile_updated_main = profile_updated_df[profile_updated_df.age < 100] #special profile profile_updated_sp = profile_updated_df[profile_updated_df.age >= 100] profile_updated_sp = profile_updated_sp.drop(['gender', 'income', 'age'], axis=1) return profile_updated_main, profile_updated_sp
6d81cec9409c67a6fda43bbc328838238adb3c7c
50,985
import os def get_file_content(full_path): """Get file content function from read_sql_files_to_db.py.""" # print(full_path) if os.path.isdir(full_path): return "" some_bytes = min(32, os.path.getsize(full_path)) binary_file = open(full_path, 'rb') raw = binary_file.read(some_bytes) binary_file.close() if '\\xff\\xfe' in str(raw): # print("utf-16") with open(full_path, 'r', encoding="utf-16", errors="backslashreplace") as the_file: data = the_file.read() else: # print("latin-1") with open(full_path, 'r', encoding="latin-1", errors="backslashreplace") as the_file: data = the_file.read() return data
d433ca5ec432aae5803e959d4767f1790593eed4
50,986
def get_all_top_level_items(tree_widget): """ Returns all the top level items of the QTreeWidget :param tree_widget: your QTreeWidget :return: list of QTreeWidgetItems """ items = [] for index in range(tree_widget.topLevelItemCount()): items.append(tree_widget.topLevelItem(index)) return items
d0dcf2134106b520ef839db0697108fd0e455730
50,987
import random def random_word(words, maxw=3): """Generate a raondom word from a given numer of words. The random word is generated by selection n words randomly from a list of words. Then, for each word, the first k letters are selected and joined to create the final word. Parameters ---------- words : list of strings A dictionary of words to select from. maxw : integer The number of words to select and combine. Returns ------- out : string The random word created. """ maxw = max(2, maxw) nwords = random.randint(2, maxw) rwords = random.sample(words, nwords) new_word = [] for word in rwords: idx = random.randint(2, len(word)) new_word.append(word[:idx]) return ''.join(new_word)
05833c7034623eac9288697d05827f9b884f783e
50,988
import json def get_dict(x): """Convert JSON to Python dict. Convert a JSON String into a Python dictionary. Args: x: JSON String Returns: Python dictionary """ return json.loads(x)
a5ec8a4f4561c89d9e623d2268208632110aa4de
50,989
def build_experiments_response(experiments: dict): """ Build the response representing a list of experiments according to the API specification. Parameters ---------- experiments: dict A dict containing pairs of ``experiment-name:experiment-version``. Returns ------- A JSON-serializable list of experiments as defined in the API specification. """ result = [] for name, version in experiments.items(): result.append({"name": name, "version": version}) return result
0c3098b5e341d30ead27ce94ef3589cf1a5a24f6
50,990
def isFromMyBusinessNetwork(parsed_email): """ If the source and destination share the same email server and are connected to the same access point, then the list of public IPs listed in the header are 0 :param parsed_email: json with the expected structure (see printEmailInfo) :return: Local IP if the source and destination are in the same network with the same email server, None in other case. """ if parsed_email.get('header').get('received_ip') is None: # get local IP of the sender return parsed_email.get('header').get('received')[0].get('from')[0] return None
4527c4348149df7dafff8f4097e8ad1eab90904e
50,991
def extract_domains(email_addresses): """ Returns a list of email domains extracted from list of email addresses Parameters ---------- email_addresses: list, required Email addresses are dict of type { "address" : "recipient1@domain.test" } Returns ------- list list of email domains """ domains = set() for address in email_addresses: domains.add(address['address'].lower().split('@')[1]) return domains
17eabfbfc995198c4a0356a755e5d22689b9c581
50,992
def _deserialize_serializable(obj): """Deserialize Feed Dict """ val = obj["type"].__new__(obj["type"]) val.__dict__.update(obj["data"]) return val
3900ae564edcc821697968ca286bd500b478bc8c
50,993
def get_unique_tags(top_n_subclasses: list): """ Gets the unique tags in all top n subclasses """ all_tags = [] for i in range(len(top_n_subclasses)): all_tags.extend(top_n_subclasses[i]) return sorted(set(all_tags))
8471a9dbaf49d29bf3b015f4dc7049b351f50e3b
50,994
def remove_duplicate_base_on_flowcell_id(list_runs): """ Take a list of runs and remove the duplicated run based on the flowcell id. It will remove the oldest run when two are found based on the run date. """ flowcell_to_run = {} for run_id in list_runs: date, machine, run_number, stage_flowcell = run_id.split('_') flowcell = stage_flowcell[1:] # If the run id has not been seen or if the date is newer than the previous one then keep it if flowcell not in flowcell_to_run or run_id > flowcell_to_run[flowcell]: flowcell_to_run[flowcell] = run_id return sorted(flowcell_to_run.values())
f70488e8b6aa8fd05de97e525501eb60c5baf3c9
50,996
def parse_invocation(event): """Return the term and url for the invocation. The term is used to check against rules. And the URL is used to check a winner. """ attachments = event.get("attachments") title = attachments[0].get("title") url = attachments[0].get("title_link") return title, url
900aa50a7b0bbe9651dfc96cc572157cad83bd97
50,998
from typing import Tuple def _image_resolution(conf, max_layers=None) -> Tuple[int, int]: """ The resolution of the laster layer of the generator network """ h, w = conf.model.first_layer_size n = conf.model.num_layers if max_layers is not None: n = min(max_layers, n) factor = pow(2, n) return h * factor, w * factor
9c01612102adf290320ef894075f6070edbce313
50,999
def is_toplevel(toplevel) -> bool: """Return `True` if `toplevel`-decorated class. Returns 'True' if 'toplevel' (class or instance) has been decorated with '@toplevel'. It checks presence 'fields_sep' attribute and 'from_path' method. """ return hasattr(toplevel, "fields_sep") and callable(getattr(toplevel, "from_path", None))
7c781d31667eadc336ef91839075df8e8a8c61db
51,001
import argparse def create_parser(): """ Parser :return: argparse.ArgumentParser """ parser = argparse.ArgumentParser() parser.add_argument( '--image_path', help='File to process', required=True ) return parser
a19e6cbda131454215cc1a604fefee5d14c1a69b
51,003
def _max_thread_width(thread): """compute the widest breadth of the thread, that is the max number of replies a comment in the thread has received""" if not thread['children']: return 0 return max( max([_max_thread_width(reply) for reply in thread['children']]), len(thread['children']) )
2689347d71177bc39f0b572c7a91782f29be641e
51,004
from typing import Iterable from typing import Any from typing import Tuple import itertools def grouper(iterable: Iterable[Any], n: int, fillvalue=None) -> Iterable[Tuple[Any, ...]]: """Collect data into fixed-length chunks or blocks""" # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" # this is here instead of using more_itertools because there were problems with the old version's argument order args = [iter(iterable)] * n return itertools.zip_longest(fillvalue=fillvalue, *args)
0497ff3d81b96e0915a25f02cb47481535774080
51,005
import platform import os import subprocess def guess_bash_ps1(): """Returns the default BASH prompt.""" # TL;DR PS1 is often inaccessible for child processes of BASH. It means, # for scripts too. # # AG 2021: PS1 is not an environment variable, but a local variable of # the shell [2019](https://stackoverflow.com/a/54999265). It seems to be # true for both MacOS 10.13 and Ubuntu 18.04. # # We can see PS1 by typing "echo $PS1" to the prompt, but ... # # 1) script.sh with `echo $PS1` | prints nothing MacOS & Ubuntu # # 2) module.py with | prints Null MacOS & Ubuntu # `print(os.environ.get("PS1") | # # 3) `bash -i -c "echo $PS1"` | seems to be OK in Ubuntu # from command line | # # 4) `zsh -i -c "echo $PS1"` | looks like a normal prompt in OSX # from command line | # # In Ubuntu (3) returns the same prompt that in used by terminal by default. # Although if the user customized their PS1, no guarantees, that (3) will # return the updated value. # # For MacOS, the prompt printed by (3) in not the same as seen in terminal # app. It returns boring "bash-3.2" instead of expected "host:dir user$". # # (4) on MacOS seems to return the correct "host:dir user$", but it is in # ZSH format. # try to return $PS1 environment variable: env_var = os.environ.get("PS1") if env_var is not None: return env_var # for MacOS return predefined constant PS1 if platform.system() == "Darwin": return r"\h:\W \u\$" # default for MacOS up to Catalina # hope for the best in other systems return subprocess.check_output( ['/bin/bash', '-i', '-c', 'echo $PS1']).decode().rstrip()
d524e99ae4db48d6d3a9e0ab6a882404f008b5ba
51,008
import six def text_repr(val): """Format values in a way that can be written as text to disk. Encode Unicode values as UTF-8 bytestrings (recommended for csv.writer), and use the str() representation of numbers. Args: val (obj): A string-able object, a `unicode` object, or `None` Returns: str: The string representation of `val` """ if isinstance(val, six.text_type): val_text = val.encode('utf8', 'ignore') elif val is None: val_text = '' else: val_text = str(val) return val_text
a7396b096f9f32e6b318530a3a1dc8f4be6545a6
51,013
import os def _get_nb_path(name: str, suffix: str = "", abs: bool = True, ext: str = ".ipynb") -> str: """ Creates a correct path no matter where the test is run from """ _local_path = os.path.dirname(__file__) path = f"{_local_path}/testdata/{name}{suffix}{ext}" return os.path.abspath(path) if abs else path
b5fc251e8fdd09137d31ebd0a46d89e180b72414
51,015
import re def list_items_to_text(docs): """ Convert HTML list items to plain text. The result is in reST(reStructuredText) format, which is suitable for Python's Sphinx documentation generator, and is also very human readable. """ docs = docs.strip() # Remove any <ul> tags (the <li> tags are all we need) docs = re.sub("</?ul[^>]*>", "", docs) # Iterate through all the <li> start and end tags, tracking the nested # list depth (-1 => not in a list, 0 => in top-level list, ...) result = '' depth = -1 end_idx = 0 for li_match in re.finditer(re.compile("</?li[^>]*>"), docs): li_start = li_match.start() li_end = li_match.end() li_text = li_match.group() # Add on the next segment of text. If we're in a list, remove any # other HTML tags it contains so list items are plain text. segment = docs[end_idx:li_start].strip() if depth >= 0: segment = re.sub("<[^>]+>", "", segment) if segment: if depth >= 0: # We're in a list, so add a bullet point marker to the first # line and align any later lines with the first line's text segment = re.sub("(?m)^\\s*", " ", segment) segment = "* " + segment[2:] # Add more indentation according to the list nesting depth if depth > 0: segment = re.sub("(?m)^", " "*depth, segment) # Add the segment, with a blank line before (and later, after) # for compatibility with Sphinx if result: result += "\n\n" result += segment end_idx = li_end # Track the list nesting depth if li_text.startswith("<li"): depth += 1 elif depth >= 0: depth -= 1 # Add the final segment (assumed to not be in a list) segment = docs[end_idx:].strip() if segment: if result: result += "\n\n" result += segment return result
24c7ea0f9e8c39cfcab8b5290fead3e860994e83
51,016
def separators(tree): """ Returns a dictionary of separators and corresponding edges in the junction tree tree. Args: tree (NetworkX graph): A junction tree Returns: dict: Example {sep1: [sep1_edge1, sep1_edge2, ...], sep2: [...]} """ separators = {} for edge in tree.edges(): sep = edge[0] & edge[1] if not sep in separators: separators[sep] = set([]) separators[sep].add(edge) return separators
652d72f0c62575e9525c38a1fc728f7fcaaf5258
51,017
def overlap(_x: list, _y: list) -> float: """overlap coefficient (Unuse) Szymkiewicz-Simpson coefficient) https://en.wikipedia.org/wiki/Overlap_coefficient """ set_x = frozenset(_x) set_y = frozenset(_y) return len(set_x & set_y) / float(min(map(len, (set_x, set_y))))
51104b6ab1dc6af5a229fbb489cc795828232e6b
51,019
def get_values_map_keys(records, keyidx=0): """ Given a dict of str->2-tuples, e.g.: {'anat': [('modality', 'anat'), ('image_file', 'anat_hc.nii.gz')], 'pet': [('modality', 'pet'), ('image_file', 'pet_fdg.nii.gz')], or Given a list of list of 2-tuples of str, e.g.: [[('modality', 'anat'), ('image_file', 'anat_hc.nii.gz')], ('modality', 'pet'), ('image_file', 'pet_fdg.nii.gz')], Will return the unique values of each record value, in this case: {'modality', 'image_file'}. Parameters ---------- values_maps_dict: Dict[str->2-tuple] Returns ------- keys: set[str] """ if not records or records is None: return [] if isinstance(records, dict): itemset = records.values() elif isinstance(records, list): itemset = records else: raise NotImplementedError('Expected a `dict` or a `list of list` as `records, ' 'got {}.'.format(type(records))) crumb_args = set() for items in itemset: crumb_args = crumb_args.union(set([t[keyidx] for t in items])) return crumb_args
cfecb13857e72014c9ba0472404bffdf5af076d1
51,020
def encode_time(t): """Encode a timestamp send at the timeclock copied from zkemsdk.c - EncodeTime""" d = ( (t.year % 100) * 12 * 31 + ((t.month - 1) * 31) + t.day - 1) *\ (24 * 60 * 60) + (t.hour * 60 + t.minute) * 60 + t.second return d
3ca90606562b81718ff8b486c4b1b43a683d6d36
51,021
def compare_changes(obj, **kwargs): """ Compare two dicts returning only keys that exist in the first dict and are different in the second one """ changes = {} for k, v in obj.items(): if k in kwargs: if v != kwargs[k]: changes[k] = kwargs[k] return changes
ad88dc60cc3c93d0da15531bf0ef11e7610b1d66
51,022
def _decode_data(byte_data: bytes): """ 解码数据 :param byte_data: 待解码数据 :return: 解码字符串 """ try: return byte_data.decode('UTF-8') except UnicodeDecodeError: return byte_data.decode('GB18030')
638b8288365f6e954e2d587ebc83a10afa780fda
51,023
def asteriskify(text, count=3): """Decorate text with asterisks Arguments: text {str} -- a text to be decorated count {int} -- number of asterisks (default: {3}) Returns: str -- a decorated text """ decor = "*" * count return "{} {} {}".format(decor, text, decor)
213810b222f3ff55103e36b619cc7157636ea9f6
51,024