content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def median(values): """Return median value for the list of values. @param values: list of values for processing. @return: median value. """ values.sort() n = int(len(values) / 2) return values[n]
c40b9e0b2cdd2b00ae6692aed2cfe9787bc8f9df
701,294
import torch def collate_fn(batch, transforms): """Collate function to be passed to the PyTorch dataloader. Parameters ---------- batch : list Uncollated batch of size `batch_size`. device : str or torch.device Current working device. transforms : callable Transformations to be applied on input PIL images. """ images, bboxess, labelss, img_widths, img_heights = zip(*batch) # Turn image widths and heights into pseudo bounding boxes to retrieve back # later img_widths = torch.tensor(img_widths, dtype=torch.float32) img_heights = torch.tensor(img_heights, dtype=torch.float32) x1_or_y1 = torch.zeros_like(img_widths, dtype=torch.float32) pseudo_img_sizes = torch.stack( [x1_or_y1, x1_or_y1, img_widths, img_heights], dim=-1) # Bounding boxes bboxess_ = [] for bboxes, pseudo_img_size in zip(bboxess, pseudo_img_sizes): assert len(bboxes) % 4 == 0 bboxes = torch.tensor(bboxes, dtype=torch.float32) bboxes = bboxes.view(-1, 4) # Concat with (pseudo) image size bboxes = torch.cat((bboxes, pseudo_img_size[None, :]), dim=0) bboxess_.append(bboxes) bboxess = bboxess_ # Labels labelss_ = [] for labels, bboxes in zip(labelss, bboxess): assert len(labels) == len(bboxes) - 1 # accounts for pseudo image size labels = torch.tensor(labels, dtype=torch.float32) labelss_.append(labels) labelss = labelss_ # Transformations images_trans, bboxess_trans, labelss_trans = [], [], [] image_boundaries = [] for image, bboxes, labels in zip(images, bboxess, labelss): outp = transforms(image=image, bboxes=bboxes, class_labels=labels) images_trans.append(outp["image"]) labelss_trans.append(outp["class_labels"]) bboxes_trans = outp["bboxes"] bboxess_trans.append(bboxes_trans[:-1, :]) image_boundaries.append(bboxes_trans[-1, :]) images_trans = torch.stack(images_trans, dim=0) image_boundaries = torch.stack(image_boundaries, dim=0) return images_trans, bboxess_trans, labelss_trans, image_boundaries
89daebd2f58d9a7b83c7bd7c14cdde2ed16d8b3d
701,295
def sanitizer(name): """ Sanitizes a string supposed to be an entity name. That is, invalid characters like slashes are substituted with underscores. :param name: A string representing the name. :returns: The sanitized name. :rtype: str """ return name.replace("/", "_")
f59aa75a40067068c711a4ca01b643c69d43cd0c
701,296
import subprocess def git_uncommited_changes(): """ Returns True if the current git branch has uncommitted changes. """ p = subprocess.Popen( ['git', 'status'], ) returncode = p.wait() return (returncode is not 0)
0e72aa04afabc0dd8a014dbfb30a08cdf22eed01
701,297
import re def password_string(value): """密码输入类型""" if len(value) < 8 or len(value) > 20 or re.match(r'^(?![0-9]+$)(?![a-zA-Z]+$)[0-9A-Za-z]{6,20}$', value) is None: raise ValueError('密码至少包含 数字和英文,长度6-20,不能出现非法字符') return value
e6bfcc473c164cd06e3acd65165f6fb9dcd251cf
701,298
def build_bankruptcy_definition(years): """Build a bankruptcy definition Notes: This function is set according to a line of best fit from Year 0 at -10% ROI to 10% ROI by Year 7. Args: years (int): No. of years for analysis Returns: Bankruptcy definition (list): A timeseries of the bankruptcy threshold graph TO DO: Allow flexibility of bankruptcy definition by allowing input of two data points rather than assuming (-10%, 0) and (10%, 7) """ bankruptcy_definition =[] for y in range(years+1): # Threshold for bankruptcy if y <= 7: bankruptcy_definition.append(y*2.8571 - 10) # Year 0 - below 10% ROI, Year 7 - 10% ROI elif y > 7: bankruptcy_definition.append(10) return bankruptcy_definition return bankruptcy_definition
c2364bd97eec587e57bc0d701d83b304631a8bd2
701,299
def bad_unpacking(): """ one return isn't unpackable """ if True: return None return [1, 2]
dd241b5aa49b58300fd883d61b5eb41b1eb92aa1
701,300
def build_slices_from_list_of_arrays(list_of_arrays, n_his, n_feat, verbose=0): """ This function creates a list of slices of shape (n_his + 1, n_feat) """ assert list_of_arrays[0].shape[1] == n_feat, "list_of_arrays[0].shape[1]={} but n_feat={}".format( list_of_arrays[0].shape[1], n_feat) X_slices = [] for m, arr in enumerate(list_of_arrays): if arr.shape[0] < n_his + 1: if verbose>0: print("Sequence {} has length {}".format(m, arr.shape[0])) else: for k in range(n_his+1, arr.shape[0] + 1): X_slice = arr[(k-n_his-1):k, :] if X_slice.shape[0] != n_his+1: if verbose>0: print("error!") X_slices.append(X_slice) return X_slices
bfec73928e84a07eab9fc00f2cf9c8b1d5ca31cb
701,301
def luhn_checksum(check_number): """http://en.wikipedia.org/wiki/Luhn_algorithm .""" def digits_of(n): return [int(d) for d in str(n)] digits = digits_of(check_number) odd_digits = digits[-1::-2] even_digits = digits[-2::-2] checksum = 0 checksum += sum(odd_digits) for d in even_digits: checksum += sum(digits_of(d * 2)) return checksum % 10
4209fd2f77acb240e7e2adfca63d638bb6f79187
701,302
def blockchain_timeframe_summary(final_df, config) -> dict: """ Get summary statistics :param final_df: The full dataframe of address information from the blockchain :param config: configuration file for including start/end times in output dataframe """ print("Getting summary counts of analysis") summary = {'total_transactions': len(final_df), 'from_time': config['START_TIME'], 'to_time': config['END_TIME'], 'count_unique_assets': final_df['asset-id-asset-transfer-tx'].nunique(), 'count_unique_applications': final_df['application-id-application-tx'].nunique()} tx_type_summary = final_df['tx-type'].value_counts().to_dict() summary.update(tx_type_summary) return summary
eedbb4e33bfdb937426ace26eba10427694356ec
701,303
import ctypes def make_array_ctype(ndim): """Create a ctypes representation of an array_type. Parameters ----------- ndim: int number of dimensions of array Returns ----------- a ctypes array structure for an array with the given number of dimensions """ c_intp = ctypes.c_ssize_t class c_array(ctypes.Structure): _fields_ = [('parent', ctypes.c_void_p), ('nitems', c_intp), ('itemsize', c_intp), ('data', ctypes.c_void_p), ('shape', c_intp * ndim), ('strides', c_intp * ndim)] return c_array
c83ab386d40043d49d1b86c21c2789c461999fc5
701,305
def get_json_request_header(): """ Return the header for JSON request :return: """ return {'Accept': 'application/json', 'Authorization': 'Token sessionTokenHere==', 'Accept-Language': 'en'}
9767910ae8e4c1fe8993ab45fa2331e9ea6efad1
701,306
import platform def get_uname(): """Get uname.""" # Preferable to running a system command uname = " ".join(platform.uname()) return uname
18b7fc9ae6c51c0c5087ed67586a7fc358697405
701,307
def reindent(str, spaces=''): """ Removes any leading empty columns of spaces """ lines = str.splitlines() lspace = [len(l) - len(l.lstrip()) for l in lines if l.lstrip()] margin = len(lspace) and min(lspace) return '\n'.join((spaces + l[margin:]) for l in lines)
af46a4440839fb38850d973df0d62abe02213755
701,308
def income_percentiles(row, percentiles, prefix="total"): """ Estimate income percentiles from counts in the census income ranges. Parameters ========== row: pandas.Series A series that contains binned incomes in the ranges given by CENSUS_INCOME_RANGES. percentiles: List[float] A list of percentiles (from zero to 100) for which to estimate values. prefix: str A prefix for the income range columns (e.g., census-indicated race). Defaults to "total" Returns ======= A list of estimated income percentiles of the same length as percentiles, in thousands of dollars. """ # Edges of the reported income bins, in thousands of dollars bins = [0, 10, 15, 20, 25, 30, 35, 40, 45, 50, 60, 75, 100, 125, 150, 200] # Iterator over percentiles p_it = iter(percentiles) # Final values for percentiles values = [] # Initialize current percentile and an accumulator variable curr = next(p_it) acc = 0 # The total count for the tract total = row[f"{prefix}_total"] if total <= 0: return values for i, b in enumerate(bins): # Compute the label of the current bin if i == 0: label = f"{prefix}_lt{bins[i+1]}" elif i == len(bins) - 1: label = f"{prefix}_gt{b}" else: label = f"{prefix}_r{b}to{bins[i+1]-1}" # Estimate the value for the current percentile # if it falls within this bin while (acc + row[label]) / total > curr / 100.0: frac = (total * curr / 100.0 - acc) / row[label] lower = b upper = bins[i + 1] if i < (len(bins) - 1) else 300.0 interp = (1.0 - frac) * lower + frac * upper values.append(interp) try: curr = next(p_it) except StopIteration: return values # Increment the accumulator acc = acc + row[label] return values
ca44ef180f13c730a2eea10c4c4b576e8d54819e
701,309
def eq_xtl( Cl, D, F, ): """ eq_xtl calculates the composition of a trace element in the remaining liquid after a certain amount of crystallization has occured from a source melt when the crystal remeains in equilibrium with the melt as described by White (2013) Chapter 7 eq. 7.81. It then calculates the concentration of that trace element in a specific solid phase based on partition coefficient input. Inputs: Cl = concentration of trace element in original liquid D = bulk distribution coefficient for trace element of crystallizing assemblage F = fraction of melt remaining Returns: Cl_new = concentration of trace element in the remaining liquid """ Cl_new = Cl / (D + F * (1 - D)) return Cl_new
298a5557775a953a645aade70f41818e42e761ac
701,310
def quantity_label(quantity): """Returns formatted string of parameter label """ labels = { 'accrate': r'$\dot{m}$', 'alpha': r'$\alpha$', 'd_b': r'$d \sqrt{\xi_\mathrm{b}}$', 'dt': r'$\Delta t$', 'fluence': r'$E_\mathrm{b}$', 'length': 'Burst length', 'lum': r'$\mathit{L}$', 'm_gr': '$M$', 'm_nw': r'$M_\mathrm{NW}$', 'mdot': r'$\dot{m}$', 'peak': r'$L_\mathrm{peak}$', 'qb': r'$Q_\mathrm{b}$', 'rate': r'Burst rate', 'redshift': '$z$', 'x': r'$X_0$', 'xedd': r'$X_\mathrm{Edd}$', 'xedd_ratio': r'$X_\mathrm{Edd} / X_0$', 'xi_ratio': r'$\xi_\mathrm{p} / \xi_\mathrm{b}$', 'xi_b': r'$\xi_\mathrm{b}$', 'xi_p': r'$\xi_\mathrm{p}$', 'z': r'$Z_\mathrm{CNO}$', } return labels.get(quantity, f'${quantity}$')
467baf8f3d2ae72474e6f65e6990779e7cb10034
701,311
def get_response(request): """ Fake view-like callback to use in middleware tests. """ return None
711106895c1aa21847152763bdd05a1f9a367ed6
701,313
def set_proper_dtypes(df): """ forgot to save integers as integers. Only the distances feature columns have true floats. """ potential_integer_cols = df.columns.difference(list(df.filter(regex='distances.*', axis=1))) for col in potential_integer_cols: if str(df[col].dtype) != 'object': df[col] = df[col].astype(int) return df
2a7d5f615c666d9a1778dfda1ba427c87541e34b
701,314
import gevent # type: ignore from gevent.monkey import is_object_patched # type: ignore from eventlet.patcher import is_monkey_patched # type: ignore import re import sys def _is_contextvars_broken(): # type: () -> bool """ Returns whether gevent/eventlet have patched the stdlib in a way where thread locals are now more "correct" than contextvars. """ try: # Get the MAJOR and MINOR version numbers of Gevent version_tuple = tuple( [int(part) for part in re.split(r"a|b|rc|\.", gevent.__version__)[:2]] ) if is_object_patched("threading", "local"): # Gevent 20.9.0 depends on Greenlet 0.4.17 which natively handles switching # context vars when greenlets are switched, so, Gevent 20.9.0+ is all fine. # Ref: https://github.com/gevent/gevent/blob/83c9e2ae5b0834b8f84233760aabe82c3ba065b4/src/gevent/monkey.py#L604-L609 # Gevent 20.5, that doesn't depend on Greenlet 0.4.17 with native support # for contextvars, is able to patch both thread locals and contextvars, in # that case, check if contextvars are effectively patched. if ( # Gevent 20.9.0+ (sys.version_info >= (3, 7) and version_tuple >= (20, 9)) # Gevent 20.5.0+ or Python < 3.7 or (is_object_patched("contextvars", "ContextVar")) ): return False return True except ImportError: pass try: if is_monkey_patched("thread"): return True except ImportError: pass return False
39c298d0fc1cedf7770215b5fe802bdc0dfe4644
701,315
def natSettings(ctx, mach, nicnum, nat, args): """This command shows/alters NAT settings. usage: nat <vm> <nicnum> settings [<mtu> [[<socsndbuf> <sockrcvbuf> [<tcpsndwnd> <tcprcvwnd>]]]] mtu - set mtu <= 16000 socksndbuf/sockrcvbuf - sets amount of kb for socket sending/receiving buffer tcpsndwnd/tcprcvwnd - sets size of initial tcp sending/receiving window """ if len(args) == 1: (mtu, socksndbuf, sockrcvbuf, tcpsndwnd, tcprcvwnd) = nat.getNetworkSettings() if mtu == 0: mtu = 1500 if socksndbuf == 0: socksndbuf = 64 if sockrcvbuf == 0: sockrcvbuf = 64 if tcpsndwnd == 0: tcpsndwnd = 64 if tcprcvwnd == 0: tcprcvwnd = 64 msg = 'mtu:%s socket(snd:%s, rcv:%s) tcpwnd(snd:%s, rcv:%s)' % (mtu, socksndbuf, sockrcvbuf, tcpsndwnd, tcprcvwnd) return (0, [msg]) else: if args[1] < 16000: print('invalid mtu value (%s not in range [65 - 16000])' % (args[1])) return (1, None) for i in range(2, len(args)): if not args[i].isdigit() or int(args[i]) < 8 or int(args[i]) > 1024: print('invalid %s parameter (%i not in range [8-1024])' % (i, args[i])) return (1, None) a = [args[1]] if len(args) < 6: for i in range(2, len(args)): a.append(args[i]) for i in range(len(args), 6): a.append(0) else: for i in range(2, len(args)): a.append(args[i]) #print(a) nat.setNetworkSettings(int(a[0]), int(a[1]), int(a[2]), int(a[3]), int(a[4])) return (0, None)
2b3e60e68f7c04e8f46b63acf3713297d781d852
701,317
def compare_cards(card1, card2): """ Compare the two given cards and return success if successful """ success = False if card1.symbol == card2.symbol: success = True # Success return success
27ee184305bd725af2dd1a4a3ef90ef72c815fbf
701,319
def local_overrides_git(install_req, existing_req): """Check whether we have a local directory and a Git URL :param install_req: The requirement to install :type install_req: pip.req.req_install.InstallRequirement :param existing_req: An existing requirement or constraint :type existing_req: pip.req.req_install.InstallRequirement :return: True if the requirement to install is a local directory and the existing requirement is a Git URL :rtype: bool """ return (install_req.link and existing_req.link and install_req.link.url.startswith('file:///') and existing_req.link.url.startswith('git+'))
5c031506ddee4d05332421f8470026fbceb0978d
701,320
def initializeANEOS(in_filename = "std::string", out_filename = "std::string", izetl = "std::vector<int>"): """Initialize ANEOS with some rather arcane input. in_filename : The name of the ANEOS input file, initializes the Fortran ANEOS library out_filename : An optional file to write any output from the ANEOS intialization call izetl : An array of the material numbers ("EOS#" in the ANEOS input file) Note, these numbers must be the negative of the "EOS#" in the input """ return "void"
eedb4227c13e78a52916f64f7589b9a694f3b28f
701,321
from typing import Any import torch def infer_device(x: Any): """Infer the device of any object (CPU for any non-torch object)""" if isinstance(x, torch.Tensor): return x.device return torch.device("cpu")
a566c267897ca602e13710a30d928fdc49f856a5
701,322
def turns_remaining(turns): """returns the number of turns remaining""" if turns == 0: return "You ran out of turns." else: return "You have {} turns remaining.".format(turns)
7315f5522c0da660ba37526005c53ba9986d8aa8
701,323
def cross_ratio(A, B, C, D): """The cross ration of four _colinear_ points is invariant under projective transformation. That means that, for any homography H, cross_ratio(A, B, C, D) == cross_ratio(HA, HB, HC, HD) which can be useful.""" # (u, v, w) is the line orthogonal to (A-D), that contains A u = A[0] - D[0] v = A[1] - D[1] w = -(u*A[0] + v*A[1]) # Find the point-line distances a = u*A[0] + v*A[1] + w # i.e., 0 b = u*B[0] + v*B[1] + w c = u*C[0] + v*C[1] + w d = u*D[0] + v*D[1] + w return ((a - c)*(b - d)) / ((b - c) * (a - d))
4095d6c448f0d43824ab5b6a05b1921de98388f1
701,324
import random def find_optimal_route(start_time, expected_time, favorite_route='SBS1K', favorite_option='bus'): """ Find optimal route for me to go from home to office. First two inputs should be datetime instances. """ # Convert to minutes tdiff = (expected_time - start_time).total_seconds()/60.0 options = {range(0, 30): 'car', range(30, 45): ('car', 'metro'), range(45, 60): ('bus:335E', 'bus:connector')} if tdiff < 80: # Pick the range it falls into for drange in options: if tdiff in drange: return drange[tdiff] # Might as well go by normal bus return random.choice(('bus:330', 'bus:331',':'.join((favorite_option, favorite_route))))
beb64a91e1b5eae4059048c6bf916b8b15dd0be5
701,325
import json import os def save_environments(workspace, project, env_data): """save environments.json file contents. env_data must be a valid json string. Returns a string with the error or empty string otherwise""" error = '' if len(env_data): try: json.loads(env_data) except: error = 'must be valid JSON' if not error: environments_path = os.path.join(workspace, 'projects', project, 'environments.json') with open(environments_path, 'w') as env_file: env_file.write(env_data) return error
ceff756c8f7ed402c866ead68d66d406f08eb621
701,326
def G1DListMergeEdges(eda, edb): """ Get the merge between the two individual edges :param eda: the edges of the first G1DList genome :param edb: the edges of the second G1DList genome :rtype: the merged dictionary """ edges = {} for value, near in eda.items(): for adj in near: if (value in edb) and (adj in edb[value]): edges.setdefault(value, []).append(adj) return edges
4393e1d9260a02b20d49afe53e86393f16a4d879
701,327
def make_rpc_batch_request_entry(rpc_name, params): """ Construct an entry for the list of commands that will be passed as a batch (for `_batch`). """ return { "id": "50", "version": "1.1", "method": rpc_name, "params": params, }
603bc7d063f638849a94820af591331f3993c496
701,328
def __increaseCombinationCounter(root): """ increase the combination-counter, which is located in the provided root-node """ if root['type']=='graphroot': root['combination'] += 1 return root
c2f7c88ef2eca4f8c4bf6532cc76ed5a1833dfe6
701,329
import time def get_time_human_readable(): """ returns well formated time string. for example: Donnerstag, 21:00 """ return time.strftime("%A, %H:%M")
2dfc67ba6eec8b830a9bacb5f4d9c6baaab17943
701,330
import numpy def gaussian_kernel(x1, x2, sigma): """ Computes the radial basis function Returns a radial basis function kernel between x1 and x2. Parameters ---------- x1 : numpy ndarray A vector of size (n, ), representing the first datapoint. x2 : numpy ndarray A vector of size (n, ), representing the second datapoint. sigma : float The standard deviation (bandwidth) parameter for the Gaussian kernel. Returns ------- rbf : float The computed RBF between the two provided data points. Instructions ------------ Fill in this function to return the similarity between `x1` and `x2` computed using a Gaussian kernel with bandwidth `sigma`. """ rbf = 0 numr = -numpy.sum((x1 - x2)**2) denr = 2*(sigma**2) rbf = numpy.exp(numr/denr) return rbf
09305fe216a1994e597b9c44c83e7f0fc5283575
701,331
import os def ensure_symlink (src, dst): """Ensure the existence of a symbolic link pointing to src named dst. Returns a boolean indicating whether the symlink already existed. """ try: os.symlink (src, dst) except OSError as e: if e.errno == 17: # EEXIST return True raise return False
0ff95fc22f59f9436e8bf1ab6fe8732c46f953cb
701,333
import re def remove_numbers(s): """Removes numbers, including times.""" return re.sub('\d+(:\d*)*(\.\d*)?', ' ', s)
b234b914cc84b04cd183ba086674df0775f3b098
701,335
def _escapeWildCard(klassContent): """ >>> _escapeWildCard('') '' >>> _escapeWildCard(':*') '' >>> _escapeWildCard(':Object') ':Object' """ return klassContent.replace(':*', '')
537b3969dabb46c3a093dacc3ba49a58833f8c18
701,336
def check_fields(default_dict, new_dict): """ Return the dictionary with default keys and values updated by using the information of ``new_dict`` :param dict default_dict: Dictionary with default values. :param dict new_dict: Dictionary with new values. :return: dict. """ # Check if the entered variable has different values for key in list(set(default_dict.keys()) & set(new_dict.keys())): default_dict[key] = new_dict[key] return default_dict
c31508cc7ac3e4a717285c2fd082fc6bd7c2eb32
701,337
from typing import Any def tsv_escape(x: Any) -> str: """ Escape data for tab-separated value (TSV) format. """ if x is None: return "" x = str(x) return x.replace("\t", "\\t").replace("\n", "\\n")
febefd579773aa4deea90e589114d39c5b8a4784
701,338
import functools def is_to(func): """Decorator that ensures caller is owner, TO, or admin.""" @functools.wraps(func) async def wrapper(self, *args, **kwargs): ctx = args[0] user = ctx.author tourney = kwargs['tourney'] if not (user == tourney.owner or tourney.channel.permissions_for(user).administrator or any(role.name == 'TO' for role in user.roles)): await ctx.send('Only a TO can run this command.') return return await func(self, *args, **kwargs) return wrapper
427306bd299fb84aaf1d271d09ad3ecedf07a66f
701,339
def cardinal(converted_data, path): """ Translates first move on path to cardinal direction. Args: converted_data (dict): python readable json path (list): path from a* Return: direction (str): cardinal direction as string """ # if x values are same check y values for direction if converted_data["you"]["body"][0]['x'] == path[1][0]: if (converted_data["you"]["body"][0]['y']) < (path[1][1]): direction = 'down' else: direction = 'up' # x values are different check them for direction else: if (converted_data["you"]["body"][0]['x']) < (path[1][0]): direction = 'right' else: direction = 'left' return direction
6802276a8e06a127383cfbe0dcbe511bd0ef2260
701,340
def sum_to_n(n: int) -> int: """ >>> sum_to_n(100) 5050 >>> sum_to_n(10) 55 """ return sum(i for i in range(1, n + 1))
867fc0c6b44b153db8215390fbb34fc47bac0c3e
701,341
def set_remove(): """ >>> sorted(set_remove()) [1, 2] """ s = set([1,2,3]) s.remove(3) return s
4a8c7971944729fb6c0226a7fbf824d744936f4b
701,342
import types import importlib.machinery def load_user_defined_function(function_name: str, module_file: str): """Function to load arbitrary functions Args: function_name (str): name of function to load from function_file module_file (str): file module where function is defined Returns: function loaded from file """ loader = importlib.machinery.SourceFileLoader('user_defined_module', module_file) module = types.ModuleType(loader.name) loader.exec_module(module) return getattr(module, function_name)
350eda67efb6d346b8833385a21959dd7bf93f47
701,343
def insert_edge_list(): """Solution to exercise R-14.8. Repeat Exercise R-14.7 for the adjacency list representation, as described in the chapter. --------------------------------------------------------------------------- Solution: --------------------------------------------------------------------------- An adjacency list uses a primary list structure V to store the vertices of graph G. V is implemented as a positional list, where each vertex v contains a reference to a secondary list I(v) which contains all of v's incident edges. A directed list will have a I_incoming and I_outgoing to distinguish between incoming and outgoing edges. An undirected list will simply have I_incoming point to I_outgoing and thus use one list to keep track of edges. If I assume that I_incoming and I_outgoing are implemented as positional lists as well, then a new edge can be added to these lists in O(1) time. Even if they are implemented as Python lists the append operation is O(1) amortized. I do not implement error checking in the pseudocode below, because the get_edge(u, v) method requires O(min(deg(u), deg(v))) time to iterate through the secondary lists and would prevent the insert_edge() method from being O(1) time. def insert_edge(u, v, x): e = Edge(u, v, x) # Create edge instance u.I_outgoing.add_last(e) # Add edge to outgoing list of u v.I_incoming.add_last(e) # Add edge to incoming list of v The above pseudocode should run in O(1) time. """ return True
8feca54965fbd5f88d2cc2ed638514e77a0b6ec9
701,344
def heuristic_iteration(gloss_tokens, trans_tokens, aln, comparison_function, multiple_matches = True, iteration=1, report=False): """ :param gloss_tokens: :type gloss_tokens: list[str] :param trans_tokens: :type trans_tokens: list[str] :param aln: :type aln: Alignment :param comparison_function: :param iteration: """ gloss_indices = range(0, len(gloss_tokens)) trans_indices = range(0, len(trans_tokens)) # On the second pass, let's move from the right # backward if iteration > 1: gloss_indices = gloss_indices[::-1] trans_indices = trans_indices[::-1] aligned_gloss_w = set(aln.all_src()) aligned_trans_w = set(aln.all_tgt()) for gloss_i in gloss_indices: # Only allow one alignment from gloss to trans. if gloss_i+1 in aligned_gloss_w: continue gloss_w = gloss_tokens[gloss_i] for trans_i in trans_indices: trans_w = trans_tokens[trans_i] # Skip any attempt to align already aligned words on the first pass. if iteration == 1 and trans_i+1 in aligned_trans_w: continue if comparison_function(gloss_w, trans_w): if report: print('ADDING "{}"--"{}"'.format(gloss_w, trans_w)) aln.add((gloss_i+1, trans_i+1)) aligned_gloss_w.add(gloss_i+1) aligned_trans_w.add(trans_i+1) if iteration == 1: break # On the first iteration, let's move to another if iteration == 2 or not multiple_matches: return aln else: return heuristic_iteration(gloss_tokens, trans_tokens, aln, comparison_function, multiple_matches, iteration+1)
dffec1a56b687012cce6153e1b89f25da7b830ca
701,345
def decrypt(sk, c): """Decrypt a cyphertext based on the provided key.""" return (c % sk) % 2
d3a96f66e1b449ffbd5b6ca1ba0827455b83f9e2
701,346
from typing import Any import inspect def is_valid_broker(obj: Any) -> bool: """ Helper utils to check if an object can be used as a broker in `WebSocketManager`. Exposed to developers who need to implement a custom broker. """ return ( (hasattr(obj, 'subscribe') and inspect.iscoroutinefunction(obj.subscribe)) and ( hasattr(obj, 'unsubscribe') and inspect.iscoroutinefunction(obj.unsubscribe) ) and (hasattr(obj, 'publish') and inspect.iscoroutinefunction(obj.publish)) and ( hasattr(obj, 'get_message') and inspect.iscoroutinefunction(obj.get_message) ) )
9452c2f8698264e28b72c5b597ff4c9804ea419e
701,347
from typing import get_origin def get_base_generic_type(object_type): """ Utility method to return the equivalent non-customized type for a Generic type, including user-defined ones. for example calling it on typing.List<~T>[int] will return typing.List<~T>. If the type is not parametrized it is returned as is :param object_type: :return: """ return get_origin(object_type) or object_type # # # # Solution referenced by https://github.com/python/typing/issues/423 # # note: is_generic_type excludes special typing constructs such as Union, Tuple, Callable, ClassVar # if is_generic_type(object_type) or is_tuple_type(object_type): # # inferred_base_type = locate(object_type.__module__ + '.' + object_type.__name__) # # return object_type.__origin__ # return get_origin(object_type) # # if inferred_base_type is not None: # # return inferred_base_type # # else: # # # that may happen if your generic class has been defined inside a local class # # # For these classes, we cant' get the base (expressed with 'T') class, strangely enough # # warn('Unable to find the base generic class for ' + str(object_type) + ' although it seems to extend ' # # 'typing.Generic. Using the class directly') # # return object_type # else: # return object_type
03be217c9894d4523a0ab4030957d46f9b75fa88
701,348
import math def centre_dot(dot_ls): """计算多个经纬度坐标的中心点""" lng = 0 lat = 0 count = len(dot_ls) for dot in dot_ls: lng += float(dot[0]) * math.pi / 180 lat += float(dot[1]) * math.pi / 180 lng /= count lat /= count center_dot = (lng * 180 / math.pi, lat * 180 / math.pi) return center_dot
bf7012b5c622e05734e394c912b7f000a3119fca
701,349
def __find_yzwing_candidates(sboard, current_cell_name, num_values, num_intersection_values): """ A support method to find candidate cells to be ywing or xyzwing pincers. """ candidates = [] current_cell = sboard.getCell(current_cell_name) current_value_set = current_cell.getValueSet() associated_cells = sboard.getAssociatedCellIds(current_cell_name) for associated_cell_name in associated_cells: associated_cell = sboard.getCell(associated_cell_name) associated_cell_value_set = associated_cell.getValueSet() intersection_value_set = current_value_set & associated_cell_value_set if (len(associated_cell_value_set) == num_values and len(intersection_value_set) > num_intersection_values): candidates.append(associated_cell) return candidates
34e45339eeee158827e8633cdab33ae80e08b3d0
701,350
def index(): """Video streaming home page.""" return "Select a spell to cast."
af7ade462eed869fa4d75722bc46e5beb199bf09
701,351
def generateEmailFlags(emailFlagTuples): """ Takes emailFlagTuples and generates the part of the script case statement which accepts email flags. """ emailFlagString = "" for emailFlagTuple in emailFlagTuples: #add a new line and indent emailFlagString += "\n " #add flag character emailFlagString += emailFlagTuple[1] #close case condition and setup email variable emailFlagString += r''') email="\"''' #input email address emailFlagString += emailFlagTuple[0] #end case statement emailFlagString += r'''\"" ;;''' return emailFlagString
d83ae2f8a135218351fb20af378fe3bf702a2344
701,353
import requests import logging def export_jira_data(jira_base_url, jira_credentials_file, jira_filter, xml_filename): """Export XML assessment data from Jira to a file. Parameters ---------- jira_base_url: str The base URL of the Jira server that houses the assessment data. jira_credentials_file : str The text file containing the username and password for the Jira account with access to the specified Jira FILTER. File format: username password jira_filter : str The ID of the Jira filter that produces the desired XML assessment data output. xml_filename : str The name of the file to store the XML assessment data in. Returns ------- bool : Returns a boolean indicating if the assessment data export was successful. """ # Grab Jira credentials from jira_credentials_file f = open(jira_credentials_file, "r") lines = f.readlines() jira_username = lines[0].rstrip() jira_password = lines[1].rstrip() f.close() jira_url = ( f"{jira_base_url}/sr/jira.issueviews:searchrequest-xml/" f"{jira_filter}/SearchRequest-{jira_filter}.xml" ) # Export XML data from Jira try: response = requests.get( jira_url, auth=(jira_username, jira_password), # We need to add a nosec tag here because we are manually disabling # certificate verification. We have to do this because the requests # package is unable to verify the certificate used by the Jira # server. verify=False, # nosec ) with open(xml_filename, "w") as xml_output: xml_output.write(response.text) logging.info( f"Successfully downloaded assessment XML data from {jira_base_url}" ) return True except (requests.exceptions.RequestException, Exception) as err: logging.critical( f"Error downloading assessment XML data from {jira_base_url}\n\n{err}\n" ) return False
277bea9aa0f959a78f249b1382822fd768a0272f
701,354
def apply_operations(context_arrays, operations): """ :param context_arrays: numpy arrays to be transformed into a format such that can be fed into a TensorFlow op graph :return: tensor-ready arrays """ preprocessed_context_arrays = [] for context_array in context_arrays: preprocessed_features = {} for key, features_array in context_array.items(): if key in set(operations.keys()): if operations[key] is not None: preprocessed_features[key] = operations[key](features_array) else: preprocessed_features[key] = features_array preprocessed_context_arrays.append(preprocessed_features) return preprocessed_context_arrays
027402a2721481e42cef7588603fe2df2e5f22cb
701,355
def _table_xml(x, y, w, h, bClosed=True, sXmlCells=None): """ XML Table element as a string """ if bClosed: # close the list of coordinates sPoints = "%d,%d %d,%d %d,%d %d,%d %d,%d" % (x, y, x + w, y, x + w, y + h, x, y + h, x, y) else: sPoints = "%d,%d %d,%d %d,%d %d,%d" % (x, y, x + w, y, x + w, y + h, x, y + h) if sXmlCells is None: sXmlCells = """<cell start-row='0' start-col='0' end-row='1' end-col='2'> <Coords points="180,160 177,456 614,456 615,163"/> </cell>""" sXml = """ <table> <Coords points="%s"/> %s </table> """ % (sPoints, sXmlCells) return sXml
51e25bda4b20019ee7cfa593324b019d7e2c0f9e
701,356
def _fix_filename(filename): """Return a filename which we can use to identify the file. The file paths printed by llvm-cov take the form: /path/to/repo/out/dir/../../src/filename.cpp And then they're truncated to 22 characters with leading ellipses: ...../../src/filename.cpp This makes it really tough to determine whether the file actually belongs in the Skia repo. This function strips out the leading junk so that, if the file exists in the repo, the returned string matches the end of some relative path in the repo. This doesn't guarantee correctness, but it's about as close as we can get. """ return filename.split('..')[-1].lstrip('./')
d4b005f591879aab44275d100e725f61b5d6a764
701,357
def testFloat(val): """ Test value for float. Used to detect use of variables, strings and none types, which cannot be checked. """ try: return type(float(val)) == float except Exception: return False
19f3eac980b4489b7d14b1f994002a887f063ac7
701,358
import subprocess import errno import re def disassemble_shcbin(binfile, objdumpcmd): """Extract a disassembly dump of a compiled shellcode binary file Return a list of (bytes, instruction) tuples """ # Disassemble the .shcode code section cmd = [objdumpcmd, '-d', '-j', '.shcode', binfile] try: output = subprocess.check_output(cmd) except subprocess.CalledProcessError: print("Unable to disassemble {}, ignoring.".format(binfile)) return None except OSError as exc: if exc.errno == errno.ENOENT: print("Unable to find {} command.".format(objdumpcmd)) return None raise instructions = [] for line in output.decode('ascii', 'ignore').splitlines(): matches = re.match(r'\s*[0-9a-f]+:\s*([0-9a-f ]+)(\t.+)?$', line) if matches is not None: hexby, asm = matches.groups() hexby = hexby.strip() if asm is not None: # Remove auto-generated comment asm = asm.split(';')[0].strip() asm = asm.split('// #')[0].strip() instructions.append((hexby, asm)) elif instructions: # Continued instruction lastinstr = instructions[-1] instructions[-1] = (lastinstr[0] + ' ' + hexby, lastinstr[1]) return instructions
3411340910324e7cbeedba6f68c7024bc462f4f6
701,359
import os import json def load_genre(metadata_path): """Load beatport_key genre data from a file Args: metadata_path (str): path to metadata annotation file Returns: (dict): with the list of strings with genres ['genres'] and list of strings with sub-genres ['sub_genres'] """ if metadata_path is None: return None if not os.path.exists(metadata_path): raise IOError("metadata_path {} does not exist".format(metadata_path)) with open(metadata_path) as json_file: meta = json.load(json_file) return { "genres": [genre["name"] for genre in meta["genres"]], "sub_genres": [genre["name"] for genre in meta["sub_genres"]], }
a0c80a37360ddaa0390ad496cc2408e71df59df1
701,361
def get_reverse(sequence): """Reverse orientation of `sequence`. Returns a string with `sequence` in the reverse order. If `sequence` is empty, an empty string is returned. """ #Convert all rna_sequence to upper case: sequence=sequence.upper() #reverse rna sequence: rna_rev_list=sequence[::-1] return rna_rev_list
a3816b66ad6e6f8a1bb963bc13bf4c656b3d9c79
701,362
import os def is_gnome(): """ Check if current DE is GNOME or not. On Ubuntu 20.04, $XDG_CURRENT_DESKTOP = ubuntu:GNOME On Fedora 34, $XDG_CURRENT_DESKTOP = GNOME Hence we do the detection by looking for the word "gnome" """ return "gnome" in os.environ["XDG_CURRENT_DESKTOP"].lower()
730e1b4468194cd15a716eed0aad4e72812cfb70
701,363
import torch def nll_catogrical(preds, target, add_const=False): """compute the loglikelihood of discrete variables""" total_loss = 0 for node_size in range(preds.size(1)): total_loss += -( torch.log(preds[:, node_size, target[:, node_size].long()]) * target[:, node_size] ).mean() return total_loss
2343e2adff44c89e73c35cacdcc55180b8029611
701,364
def to_xhr_response(request, non_xhr_result, form): """ Return an XHR response for the given ``form``, or ``non_xhr_result``. If the given ``request`` is an XMLHttpRequest then return an XHR form submission response for the given form (contains only the ``<form>`` element as an HTML snippet, not the entire HTML page). If ``request`` is not an XHR request then return ``non_xhr_result``, which should be the result that the view callable would normally return if this were not an XHR request. :param request: the Pyramid request :param non_xhr_result: the view callable result that should be returned if ``request`` is *not* an XHR request :param form: the form that was submitted :type form: deform.form.Form """ if not request.is_xhr: return non_xhr_result request.override_renderer = 'string' return form.render()
e32c94ecb3a5ce12c81ea312647b8bf527007bfc
701,365
def optional(converter): """ A converter that allows an attribute to be optional. An optional attribute is one which can be set to ``None``. :param callable converter: the converter that is used for non-``None`` values. .. versionadded:: 17.1.0 """ def optional_converter(val): if val is None: return None return converter(val) return optional_converter
128042c7a95bb91c665ab6ac0f6771e4a72632ed
701,366
import pickle def get_challenge_by_channel_id(database, challenge_channel_id): """ Fetch a Challenge object in the database with a given channel ID Return the matching Challenge object if found, or None otherwise. """ ctfs = pickle.load(open(database, "rb")) for ctf in ctfs: for challenge in ctf.challenges: if challenge.channel_id == challenge_channel_id: return challenge return None
9ae3fc3519b61c6e9a1abceb20b8882e2e29ca48
701,367
def combine_orderings(ordering_1, ordering_2): """ Function to combine two orderings. Example 1: ordering_1 = ((7,2), 'X') ordering_2 = ((6,5),) combined_ordering = ((7,2),(6,5)) Example 2: ordering_1 = ((7,2), 'X', 'X') ordering_2 = ((6,5), 'X') combined_ordering = ((7,2),(6,5), 'X') Example 3: ordering_1 = ('X', (7,2), 'X') ordering_2 = ((6,5), 'X') combined_ordering = ((6,5),(7,2), 'X') """ if ordering_1 == None: return ordering_2 if ordering_2 == None: return ordering_1 idx_2 = 0 combined_ordering = [] for idx_1, elem in enumerate(ordering_1): if elem == 'X': combined_ordering.append(ordering_2[idx_2]) idx_2 += 1 else: combined_ordering.append(ordering_1[idx_1]) return combined_ordering
c5faed01c387a6c958dfed324da5bfa1bb2b06bd
701,368
def whoami_fn(request): """ Test-route to validate token and nickname from headers. :return: welcome-dict """ nickname = request.validated["user"].nickname return { "status": "ok", "nickname": nickname, "message": "Hello " + nickname + ", nice to meet you." }
c8d23a20a6d4f56832e45739ffb81d3aca398bed
701,369
import json def save_tracks(input_tracks, out_filename): """Saves smoothed tracks to the specified file. :param input_tracks: List of input tracks :param out_filename: File to save tracks """ def _convert_track(input_track): """Converts event from internal representation to dict format :param input_track: List of events :return: Event as dict """ return [event._asdict() for event in input_track] out_tracks = {} for track_id in input_tracks: gt_events, pred_events = input_tracks[track_id] converted_gt_events = _convert_track(gt_events) converted_pred_events = _convert_track(pred_events) out_tracks[track_id] = {'gt': converted_gt_events, 'pred': converted_pred_events} with open(out_filename, 'w') as outfile: json.dump(out_tracks, outfile)
884d2e1906f53c4dc66f21536d7e666920016ccf
701,370
def verificar(palavra, entrada): """ Confere se as tentativas do usuário estão corretas conforme a palavra sorteada. Se não estiver, retorna à função principal e continua a iteração. Entrada: palavra, entrada (string, input/string). Saída: bool. """ return palavra == entrada
b1de59f316d3ba63c73979f2e5cc9584a20e291f
701,371
def qstr(s, validate=True): """Return a quoted string after escaping '\' and '"' characters. When validate is set to True (default), the string must consist only of 7-bit ASCII characters excluding NULL, CR, and LF. """ if validate: s.encode('ascii') if '\0' in s or '\r' in s or '\n' in s: raise ValueError('string contains NULL, CR, or LF characters') return '"' + s.replace('\\', '\\\\').replace('"', '\\"') + '"'
2805c2aff61294cafe6719e9a8bd93082d9603df
701,372
import random def random_permutation(iterable, r = None): """random_product(iterable, r = None) -> tuple Arguments: iterable: An iterable. r(int): Size of the permutation. If :const:`None` select all elements in `iterable`. Returns: A random element from ``itertools.permutations(iterable, r = r)``. Examples: >>> random_permutation(range(2)) in {(0, 1), (1, 0)} True >>> random_permutation(range(10), r = 2) in permutations(range(10), r = 2) True """ pool = tuple(iterable) r = len(pool) if r is None else r return tuple(random.sample(pool, r))
7e84d33b62786d08443dc5ea2c66ff65ced3070c
701,373
def stringToAscii(sequence): """sequence is a sequence of characters. Return the string with the hex representation for each character""" return "".join("%02x" % ord(c) for c in sequence).upper()
19f0c5a057c6176cee7e0f6e7ac5e8834ea0d650
701,374
def language_to_flag(code): """Generates css flag class for the language code""" if code.lower() == 'en': return 'flag-icon flag-icon-us' return 'flag-icon flag-icon-' + code.lower()
8d81d1e1cdac675a6bdf94bb22ac6f45993338db
701,375
from typing import List def add_percentage_value(l:int, to:List[list]) -> List[list]: """ counter.most_commonしたtaxonomyリストに割合を追加する :param sids: :param sto: :return: """ for d in to: d.append(d[2]/l) return to
0744c47d0d616e3a4fef85bc341ae0ed98bb0695
701,376
import collections import re def parse_cmu(cmufh): """Parses an incoming file handle as a CMU pronouncing dictionary file. (Most end-users of this module won't need to call this function explicitly, as it's called internally by the :func:`init_cmu` function.) :param cmufh: a filehandle with CMUdict-formatted data :returns: a list of 2-tuples pairing a word with its phones (as a string) """ pronunciations = collections.defaultdict(list) regexp = re.compile(r'\(\d\)$') for line in cmufh: line = line.strip().decode('latin1') if line.startswith(';'): continue word, phones = line.split(" ") word = regexp.sub('', word.lower()) pronunciations[word.lower()].append(phones) return pronunciations
6229bf41bb3a11ffaf1f5f45dfd55d6e051b0e45
701,377
def truncate_patch_version(version): """Return just the major and minor versions from `version`.""" split_version = version.split(".") return "{}.{}".format(split_version[0], split_version[1])
d4eb7f23aeab17f862b13109315a417039eb6897
701,378
def CGz(Omega, k, l, m, f, N2, w=0): """ Vertical Group Speed (includes vertical flow but is 0 by default) """ K2 = k**2 + l**2 + m**2 return (-1*(k**2 + l**2) * m * (N2 - f**2)) / (K2**2 * Omega)
f20f80dcfca91048f1c47d0ca1270b81ae56819f
701,379
def get_key_value_list(lines): """ Split lines at the first space. :param lines: lines from trackhub file :return: [(name, value)] where name is before first space and value is after first space """ result = [] for line in lines: line = line.strip() if line: parts = line.split(" ", 1) name = parts[0] value = parts[1] result.append((name, value)) return result
aa2d1c3b100c963a50c05cfaf684df5bbed612bd
701,380
def summarize_results(result,task): """ creates a summary of the result as dict """ summary={ #'Csc':data['pairwise_contact_matrix'], #'Cpop':data['population_contact_matrix'], #'Xmean':data['prior_shape_model'], 'n':result.n, 'd':result.d, 'a':result.a, 'b':result.b, 'E_evol':result.e, 'X_evol':result.XK, 'computation_time':result.computation_time, 'method':result.method, 'initialized_curve':result.initialized_curve, } if task['store_task']: summary['json_task']=task if 'nonspecified_zeros_as_missing' in task: summary['nonspecified_zeros_as_missing']=task['nonspecified_zeros_as_missing'] if 'uuid'in task: summary['uuid']=task['uuid'] for term in result.term_weights.keys(): summary['parameters_term_weights_'+term.replace(' ','_')]= result.term_weights[term] print(term+' = '+str(summary['parameters_term_weights_'+term.replace(' ','_')])) return summary
dca60dcbef0bb747d288f60bcbef6b6531c2094e
701,381
def get_data_field(thing_description, data_field_list): """Get the field specified by 'data_field_list' from each thing description Args: data_field_list(list): list of str that specified the hierarchical field names For example, if the parameter value is ['foo', 'bar', 'foobar'], then this function will try to get thing_description['foo']['bar']['foobar'] and return the value If any of the field does not exist, an error will occur Returns: object: the content specified by the data field """ for data_field in data_field_list: thing_description = thing_description[data_field] return thing_description
f0ef0a46fbcafa993e01f59349c1283b51bbd393
701,382
import numpy as np def get_anchor_labels(anchors, coords, config): """ Generates the anchor labels for tranining the PPN. Returns y_conf, y_reg. anchors The list of anchor coordinates generated from get_anchors(). coords The list of ground truth point coordinates. config The configuration dictionary. See ppn.config.ppn_config. """ r_near = config['r_near'] r_far = config['r_far'] img_size = config['image_size'] feature_size = config['feature_size'] step = img_size / feature_size halfstep = step * 0.5 y_conf = np.full(anchors.shape[0], -1, dtype=np.int8) y_reg = np.zeros(anchors.shape) # For each point, find the nearest anchor and calculate the distance. # This ensures that most points have an associated anchor. for (x, y) in coords: x_norm = (x - halfstep) / step y_norm = (y - halfstep) / step r = int(np.round(y_norm)) c = int(np.round(x_norm)) anchor_index = r * feature_size + c y_conf[anchor_index] = 1 y_reg[anchor_index][0] = (x - anchors[anchor_index][0]) / step y_reg[anchor_index][1] = (y - anchors[anchor_index][1]) / step # for each anchor, calculate the distances to each point count = 0 for i in range(0, len(anchors)): x, y = anchors[i] x /= step y /= step distances = [] for (px, py) in coords: px /= step py /= step distances.append(np.sqrt((x-px)**2 + (y-py)**2)) if len(distances) > 0: near = np.argmin(distances) dist = distances[near] if dist <= r_near: y_conf[i] = 1 px, py = coords[near] px /= step py /= step y_reg[i][0] = (px - x) y_reg[i][1] = (py - y) elif dist > r_far: y_conf[i] = 0 # reshape for use in PPN training y_conf = np.reshape(y_conf, (feature_size, feature_size)) y_reg = np.reshape(y_reg, (feature_size, feature_size) + (2,)) return y_conf, y_reg
c12e25dc672f48d7bd98255dd8a84c2ec4310a1d
701,383
def get_speed_formatted_str(speed): """ Returns the speed with always two whole numbers and two decimal value. Example: 03.45 Args: speed (float): The actual speed of the car Returns: str: The text format of the speed """ speed_str = "{:0.2f}".format(round(speed, 2)) return speed_str.zfill(5)
c05d20f568950f8236f9e46e90387e3a71090589
701,384
def ccw(A, B, C): """ Check if a point C is counter-clockwise to AB. """ return (C[1] - A[1])*(B[0]-A[0]) > (B[1]-A[1])*(C[0]-A[0])
c1afb4e510be6a85ad7de1aa924917e37b227dbe
701,385
from unittest.mock import call import logging def stack_remove(name: str): """Removes a stack. Due to limitations of the Docker SDK this calls the actual Docker CLI.""" proc = call(["docker", "stack", "rm", name]) if proc.returncode is not 0: logging.error("`docker stack remove` exited with non-zero exit code.") return False return True
9ad76d789b9d749119826688902acb08752eeabc
701,386
def AsQuotedString(input_string): """Convert |input_string| into a quoted string.""" subs = [ ('\n', '\\n'), ('\t', '\\t'), ("'", "\\'") ] # Go through each substitution and replace any occurrences. output_string = input_string for before, after in subs: output_string = output_string.replace(before, after) # Lastly wrap the string in quotes. return "'%s'" % output_string
edf344cae423e13a6cfc04e960674c954b5eb7ee
701,387
from typing import Counter def check_cardinality(df, cat_cols, threshold=8): """ Check categorical cardinality Checks the cardinality of categorical features of a given dataset. Returns two dictionaries, one for features with low cardinality and another for features with high cardinality. The low/high cardinality criteria can be tunned with the `threshold` parameter. Parameters ---------- df : pandas.DataFrame Dataset whose categorical features will be analyzed. cat_cols : list of str List of column names. The columns must be all categorical. threshold : int, optional Numeric criteria to separate low cardinality features from high cardinality ones. Default value is 8. Returns ------- low_card : dict Dictionary containing the name of the low cardinality features as keys and their cardinality as values. high_card : dict Dictionary containing the name of the high cardinality features as keys and their cardinality as values. """ high_card = {} low_card = {} for col in cat_cols: rank = len(Counter(df[col])) if rank <= threshold: low_card[col] = rank else: high_card[col] = rank return low_card, high_card
5b33a39c1da007de46ca409c8fb531b3b3600a7b
701,388
def create_occurrence(create_reservation): """Return a callable which lets you create reservation occurrences.""" def _create_occurrence(start_dt=None, end_dt=None, room=None): params = {} if start_dt is not None: params['start_dt'] = start_dt if end_dt is not None: params['end_dt'] = end_dt if room is not None: params['room'] = room reservation = create_reservation(**params) return reservation.occurrences[0] return _create_occurrence
1b11afb19e8e1b266532dd9198c2d93010351f97
701,389
def parse_veh_comp(xmldoc): """parses the vehicle composition from the VISSIM data :param xmldoc: input VISSIM xml :type xmldoc: xml.dom.minidom.Document :return: relevant VISSIM vehicleComposition data :rtype: dict of list of dict """ veh_cmp_d = dict() # local vehicle compositions' dict for vehicle_comp in xmldoc.getElementsByTagName('vehicleComposition'): rel_flows = vehicle_comp.getElementsByTagName( 'vehicleCompositionRelativeFlow') flow_l = [] for flow in rel_flows: flw_d = { 'desSpeedDistr': flow.getAttribute('desSpeedDistr'), 'rel_flow': flow.getAttribute('relFlow'), 'vehType': flow.getAttribute('vehType'), } flow_l.append(flw_d) # list of dictionaries veh_cmp_d[vehicle_comp.getAttribute('no')] = flow_l return veh_cmp_d
195b2c8dcbd055d5c8e8fdb4f6b68f360c60c961
701,390
import time def uniq_table_id(): """Return a unique table ID based on the current time in ms. Returns: str: in format `U<timestamp_ns>` """ return f'U{time.time_ns()}'
cffe47d5e8dfff192e2a754347534e02d4ba6a68
701,391
import inspect def _get_real_env_hack_hack_hack(): """ Get the real, currently-being-configured libtbx.env environment. This is not libtbx.env, because although libtbx.env_config.environment.cold_start does: self.pickle() libtbx.env = self the first time there is an "import libtbx.load_env" this environment gets replaced by unpickling the freshly-written libtbx_env file onto libtbx.env, thereby making the environment accessed via libtbx.env *not* the actual one that is currently being constructed. So, the only way to get this environment being configured in order to - like - configure it, is to walk the stack trace and extract the self object from environment.refresh directly. """ for frame in inspect.stack(): if ( frame.filename.endswith("env_config.py") and frame.function == "refresh" and "self" in frame.frame.f_locals ): return frame.frame.f_locals["self"] raise RuntimeError("Could not determine real libtbx.env_config.environment object")
f0fe71b5c0a3922300f9d6d81aeb6057e39dd4c3
701,392
def split_dataframe_by_position(df, splits): """ Takes a dataframe and an integer of the number of splits to create. Returns a list of dataframes. """ dataframes = [] index_to_split = len(df) // splits #print(index_to_split) start = 0 end = index_to_split for split in range(splits): temporary_df = df.iloc[start:end, :] dataframes.append(temporary_df) start += index_to_split print(start) end += index_to_split print(end) print() return dataframes
7536920125fa33fafd420c4fe7ef24aff2fa6ff3
701,393
import torch def index_relation_types(dataset): """Classify relations into 1-N, M-1, 1-1, M-N. According to Bordes et al. "Translating embeddings for modeling multi-relational data.", NIPS13. Adds index `relation_types` with list that maps relation index to ("1-N", "M-1", "1-1", "M-N"). """ if "relation_types" not in dataset._indexes: # 2nd dim: num_s, num_distinct_po, num_o, num_distinct_so, is_M, is_N relation_stats = torch.zeros((dataset.num_relations(), 6)) for index, p in [ (dataset.index("train_sp_to_o"), 1), (dataset.index("train_po_to_s"), 0), ]: for prefix, labels in index.items(): relation_stats[prefix[p], 0 + p * 2] = relation_stats[ prefix[p], 0 + p * 2 ] + len(labels) relation_stats[prefix[p], 1 + p * 2] = ( relation_stats[prefix[p], 1 + p * 2] + 1.0 ) relation_stats[:, 4] = (relation_stats[:, 0] / relation_stats[:, 1]) > 1.5 relation_stats[:, 5] = (relation_stats[:, 2] / relation_stats[:, 3]) > 1.5 relation_types = [] for i in range(dataset.num_relations()): relation_types.append( "{}-{}".format( "1" if relation_stats[i, 4].item() == 0 else "M", "1" if relation_stats[i, 5].item() == 0 else "N", ) ) dataset._indexes["relation_types"] = relation_types return dataset._indexes["relation_types"]
639059ecf319ce367a62db64edc1efebc8ef5050
701,394
import os def patch_load_cfg(monkeypatch): """ A fixture that returns a function which will patch 'utils.load_cfg_file' when called in a test The caller can specify the dict that should be returned when 'load_cfg_file' is called against these two paths: * "templatesTEST/_cfg.yml" * "templatesTEST/service/_cfg.yml" """ def _func(base_cfg_data, service_cfg_data): def _patched_load_cfg_file(path): if path.endswith(os.path.join("templatesTEST", "service", "_cfg.yml")): return service_cfg_data if path.endswith(os.path.join("templatesTEST", "_cfg.yml")): return base_cfg_data else: raise Exception("Unknown path passed to load_cfg_file") monkeypatch.setattr("ocdeployer.deploy.load_cfg_file", _patched_load_cfg_file) yield _func
0fb6436a7bc57e547542c92f8c51b62be7bb286e
701,395
def validate_enum(datum, schema, **kwargs): """ Check that the data value matches one of the enum symbols. i.e "blue" in ["red", green", "blue"] Parameters ---------- datum: Any Data being validated schema: dict Schema kwargs: Any Unused kwargs """ return datum in schema["symbols"]
689fef653b757435d45e76afbf16245d0d53839f
701,396
import re def get_valid_filename(s): """Sanitize string to make it reasonable to use as a filename. From https://github.com/django/django/blob/master/django/utils/text.py Parameters ---------- s : string Examples -------- >>> print get_valid_filename(r'A,bCd $%#^#*!()"\' .ext ') 'a_bcd__.ext' """ s = re.sub(r'[ ,;\t]', '_', s.strip().lower()) return re.sub(r'(?u)[^-\w.]', '', s)
a8161a16d0bd8ad0c5d9ff20c56b52fbdba2d859
701,397
import numpy def utils_fft(series): """ Computes the inverse fast foyer transform. Parameters Input series : array_like Output The transformed array. """ return numpy.fft.fft(series)
a9c9dd3b405b7ef185125576386ac9fc008ea76f
701,399
from typing import Literal from typing import List def get_foot_marker(foot: Literal["left", "right"]) -> List[str]: """Get the names of all markers that are attached ot a foot (left or right)""" sensors = ["{}_fcc", "{}_toe", "{}_fm5", "{}_fm1"] return [s.format(foot[0]) for s in sensors]
518fbb3f68cbf8622b2bf1fa85f9ecae8008c456
701,401
def get_price_history(data, date, beta_window, sid, benchmark): """ Create a DataFrame containing the data for the necessary sids within that time frame """ if not beta_window: history_index = data.index.searchsorted(date) history_index_start = data.index.searchsorted(data[data[sid] != 0].index[0]) histotical_prices = data.iloc[history_index_start:history_index][[sid, benchmark]] else: history_index = data.index.searchsorted(date) history_index_start = max([history_index - beta_window, 0]) histotical_prices = data.iloc[history_index_start:history_index][[sid, benchmark]] histotical_prices = histotical_prices[histotical_prices[sid] != 0] return histotical_prices[histotical_prices != 0].dropna()
cddb1e59dbc783e4b36c0246e1d90e78169c6445
701,402
import six def _validate_string(s, accept_none = False): """ A validation method to convert input s to string or raise error if it is not convertable """ if s is None and accept_none : return None try: if isinstance(s,list): return [six.text_type(item) for item in s] elif isinstance(s,dict): return dict((six.text_type(key), six.text_type(value)) for key, value in s.items()) else: return six.text_type(s) except ValueError: raise ValueError('Could not convert "%s" to string' % s)
0c7b20884a27714acb0c16bddcbc113bd3a8c60e
701,403