content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
async def agather(aiter): """Gather an async iterator into a list""" lst = [] async for elem in aiter: lst.append(elem) return lst
0a7f278d38237a722724572b6705deab429c8e70
698,407
from datetime import datetime import pytz def now(): """Returns the ISO 8601 formatted time in UTC""" return datetime.now(pytz.utc).isoformat()
e423e8555315f551f284c92f4fee5b3f92c1f41d
698,408
def r_value(aa1, aa2, aa_dict, matrix): """ :param aa1: :param aa2: :param aa_dict: :param matrix: :return: """ return sum([(matrix[i][aa_dict[aa1]] - matrix[i][aa_dict[aa2]]) ** 2 for i in range(len(matrix))]) / len(matrix)
034d1d7eb0baa63cd5bd4572c87f86bcc8774de4
698,409
def init(param_test): """ Initialize class: param_test """ # initialization default_args = ['-t mt/label/ -w mt/warp_template2mt.nii.gz -gm mt/mt1_gmseg.nii.gz -wm mt/mt1_wmseg.nii.gz -manual-gm mt/mt1_gmseg_goldstandard.nii.gz -sc mt/mt1_seg.nii.gz -param step=1,type=seg,algo=centermassrot,metric=MeanSquares:step=2,type=im,algo=syn,metric=MeanSquares,iter=3,smooth=0,shrink=2'] # assign default params if not param_test.args: param_test.args = default_args return param_test
cd0b7e61cabda521c0f07e72e74316e7766ea2d2
698,410
def calculateProgressMetrics(job): """ job has the most recent values updated for the current event being processed. Calculate status and progress metrics using the new information. :param job: JSON job data struture from dynamodb """ progressMetrics = {} # BASE TIMES FROM EVENTS # createTime = timestamp of CREATE event # firstProgressingTime = timestamp of earliest PROGRESSING event # lastProgressingTime = timestamp of latest PROGRESSING event or COMPLETE event # lastStatusTime = timestamp of latest status update or COMPLETE event # completeTime = timestamp of COMPLETE event # lastTime = latest timestamp seen so far # BASE METRICS FROM EVENTS # framesDecoded = most recent STATUS event frames decoded or frame count if COMPLETE event # ['analysis'] frameCount = frame count from CREATE event if 'progressMetrics' in job and 'framesDecoded' in job['progressMetrics']: progressMetrics['framesDecoded'] = job['progressMetrics']['framesDecoded'] if 'analysis' in job and 'frameCount' in job['analysis']: progressMetrics['frameCount'] = job['analysis']['frameCount'] # CALCULATED METRICS # percentDecodeComplete = framesDecoded / frameCount * 100 # framesRemaining = frameCount - framesDecoded if 'framesDecoded' in progressMetrics: if 'analysis' in job and 'frameCount' in job['analysis']: # progressMetrics['percentDecodeComplete'] \ # = job['progressMetrics']['framesDecoded'] / job['analysis']['frameCount'] * 100 progressMetrics['framesRemaining'] \ = job['analysis']['frameCount'] - job['progressMetrics']['framesDecoded'] # queuedDuration = firstProgressingTime - createTime if 'firstProgressingTime' in job['eventTimes'] and 'createTime' in job['eventTimes']: progressMetrics['queuedDuration'] \ = job['eventTimes']['firstProgressingTime'] - job['eventTimes']['createTime'] # progressingDuration = lastProgressingTime - firstProgressingTime if 'firstProgressingTime' in job['eventTimes'] and 'lastProgressingTime' in job['eventTimes']: progressMetrics['progressingDuration'] \ = job['eventTimes']['lastProgressingTime'] - job['eventTimes']['firstProgressingTime'] # statusDuration = lastStatusTime - firstProgressingTime if 'firstProgressingTime' in job['eventTimes'] and 'lastStatusTime' in job['eventTimes']: progressMetrics['statusDuration'] \ = job['eventTimes']['lastStatusTime'] - job['eventTimes']['firstProgressingTime'] # decodeDuration = decodeTime - firstProgressingTime if 'firstProgressingTime' in job['eventTimes'] and 'decodeTime' in job['eventTimes']: progressMetrics['decodeDuration'] \ = job['eventTimes']['decodeTime'] - job['eventTimes']['firstProgressingTime'] # decodeRate = framesDecoded / statusDuration if 'framesDecoded' in progressMetrics and 'statusDuration' in progressMetrics: progressMetrics['decodeRate'] = progressMetrics['framesDecoded'] / progressMetrics['statusDuration'] # estDecodeTimeRemaining = decodeRate * framesRemaining if 'decodeRate' in progressMetrics and progressMetrics['decodeRate'] > 0 and 'framesRemaining' in progressMetrics: progressMetrics['estDecodeTimeRemaining'] = progressMetrics['framesRemaining'] / progressMetrics['decodeRate'] return progressMetrics
73524e53f5dd77c81cd0e9f6239588e41b9bc841
698,411
def rescale_boxes(boxes, current_dim, original_shape): """ Rescales bounding boxes to the original shape """ orig_h, orig_w = original_shape # The amount of padding that was added pad_x = max(orig_h - orig_w, 0) * (current_dim / max(original_shape)) pad_y = max(orig_w - orig_h, 0) * (current_dim / max(original_shape)) # Image height and width after padding is removed unpad_h = current_dim - pad_y unpad_w = current_dim - pad_x # Rescale bounding boxes to dimension of original image boxes[:, 0] = ((boxes[:, 0] - pad_x // 2) / unpad_w) * orig_w boxes[:, 1] = ((boxes[:, 1] - pad_y // 2) / unpad_h) * orig_h boxes[:, 2] = ((boxes[:, 2] - pad_x // 2) / unpad_w) * orig_w boxes[:, 3] = ((boxes[:, 3] - pad_y // 2) / unpad_h) * orig_h return boxes
769264b54e0a2dc438d78942e4c86f46c1b6470e
698,412
import socket def tcp_socket_open(host, port): """ Returns True if there is an open TCP socket at the given host/port """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(1) try: return sock.connect_ex((host, port)) == 0 except socket.timeout: return False
11519e5d9f2d1de39d8091af011f0458e556021c
698,413
def variance(data): """ Assumes that an array of integers is passed """ mean = sum(data)/len(data) tot = 0.0 for d in data: tot += (d - mean)**2 return tot/mean
3329792cf7043ad6e68cad39d6a07bf2909d1496
698,414
from typing import Optional from typing import List import math import random def sample_from_range(range_max: int, sample_ratio: float, max_samples: int, preselected: Optional[List[int]]) -> List[int]: """ Given a range of numbers in 0..range_max, return random samples. Count of samples is set by sample_ratio, up to max_samples. If preselected is passed, include these indexes first. """ available_indexes = list(range(range_max)) sample_count = min(math.floor(range_max * sample_ratio), max_samples) if preselected: chosen = list(preselected) for i in preselected: available_indexes.remove(i) sample_count = max(sample_count - len(preselected), 0) else: chosen = [] if sample_count > 0: chosen += random.choices(available_indexes, k=sample_count) return chosen
8be7b5ded3b6f3b54da57027a5d7629a9bf5dc9f
698,415
def occ(s1,s2): """occ (s1, s2) - returns the number of times that s2 occurs in s1""" count = 0 start = 0 while True: search = s1.find(s2,start) if search == -1: break else: count +=1 start = search+1 return count
4f107e5552d794e9e82a8a6b19ec63992034e9f7
698,416
import functools import operator def prod(x): """Equivalent of sum but with multiplication.""" # http://stackoverflow.com/a/595396/1088938 return functools.reduce(operator.mul, x, 1)
31f4defa3401d6dcf05f8ec70c32650f000a1852
698,417
def indent(text, prefix=" "): """ Add a prefix to every line in a string. """ return "\n".join(prefix+line for line in text.splitlines())
e83cc0b14c5b8c304f6e41bf145b06b1de451e8c
698,418
def get_resblock(model, layer_name): """ model is a class resnet e.g. layer_name is "layer1.0.relu2" """ obj_name, b_idx, act_name = layer_name.split(".") b_idx = int(b_idx) block = getattr(model, obj_name)[b_idx] return block
a4ae6c65df336b3dd36b53e9dd348a76b4a47b89
698,419
def compute_rolling_mean(df, window=30): """Return rolling mean of given values, using specified window size.""" df['SMA_{}'.format(str(window))] = df['Price'].rolling(window=window, center=False).mean() return df
36048112a720e0bdd66d2dfeeac6fb5cab62fa46
698,420
def FindClientNode(mothership): """Search the mothership for the client node.""" nodes = mothership.Nodes() assert len(nodes) == 2 if nodes[0].IsAppNode(): return nodes[0] else: assert nodes[1].IsAppNode() return nodes[1]
06ac3bcde5571ca425d47885ad5839f2aa32ca0d
698,421
import textwrap def to_flatimage(pathfile, to="(-3,0,0)", width=8, height=8, name="temp"): """ Adds a flat image to the canvas. """ text = rf""" \node[canvas is xy plane at z=0] ({name}) at {to}{{ \includegraphics[width={width}cm,height={height}cm]{{{pathfile}}} }}; """ return textwrap.dedent(text=text)
61906516d85ba3311506067ad4ec81b32786f1b9
698,422
def CalculateMSE(xs_, mu): """ 计算均方误差 xs_: 所有样本均值 mu: 总体均值 """ return sum([(x - mu) ** 2 for x in xs_]) / len(xs_)
45e90b003e05d54fdfa13c82b1f1332d0e68082a
698,423
def _get_primary_status(row): """Get package primary status.""" try: return row.find('div', {'class': 'pack_h3'}).string except AttributeError: return None
1df5b66e9e16e17b8c5f3fc019a587a4320cf6d0
698,424
def calc_n_g(df_schedule, week): """Calculate list of games i.e. Ng Each row has home_id, away_id, home_total_points, away_total_points :param df_schedule: data frame with each matchup :param week: current matchup period id :return: list of games with team ids and scores for home/away """ df_scores = ( df_schedule .query(f'matchupPeriodId <= {week} & winner!="UNDECIDED"') [['home_id', 'away_id', 'home_total_points', 'away_total_points']] ) return df_scores
1029840d28617d5de60fa499e8e6a9ae40b4ddea
698,425
def is_string(s): """ Portable function to answer whether a variable is a string. Parameters ---------- s : object An object that is potentially a string Returns ------- isstring : bool A boolean decision on whether ``s`` is a string or not """ return isinstance(s, str)
ed59a1b3a80f6695c971c49ff3b7936aa048523f
698,426
from os.path import dirname def default_install_dir(): """Return the default install directory. Assumes this file lives in a 'site-packages' directory. Returns ------- :class:`str` The path to the install directory. """ return dirname(dirname(dirname(dirname(dirname(__file__)))))
238ec1d309f6697978ef97198cc21b205e2dd6cb
698,427
def flatten_nested_list(list_in, drop_dupes=False): """Function to take a nested list and flatten into a 1d list.""" is_nested=True if type(list_in[0]) ==type(['v','d']) else False if is_nested is True: list_out=[]; if drop_dupes is True: for list_i in list_in: [list_out.append(r) for r in list_i if r not in list_out] else: for list_i in list_in: [list_out.append(r) for r in list_i if r not in list_out] else: list_out=list_in return list_out
d3580ea22458ef794bdef280cec650f1521fda04
698,428
def computeIchimokuCloud(candleSet): """ compute current values of Ichimoku Cloud lines """ conversionLine = (max([x["High"] for i,x in candleSet.iloc[-9:].iterrows()]) + min([y["Low"] for i,y in candleSet.iloc[-9:].iterrows()]))/2 baseLine = (max([x["High"] for i,x in candleSet.iloc[-26:].iterrows()])+ min([y["Low"] for i,y in candleSet.iloc[-26:].iterrows()]))/2 leadingSpanA = (conversionLine + baseLine)/2 leadingSpanB = (max([x["High"] for i,x in candleSet.iloc[:].iterrows()]) + min([y["Low"] for i,y in candleSet.iloc[:].iterrows()]))/2 laggingSpan = candleSet.iloc[-26]["Close"] return conversionLine,baseLine,leadingSpanA,leadingSpanB,laggingSpan
781df8f002a7fc3f5f7b3dd854a123bb3f21c142
698,429
def update_subfield_snippet(context, code, subfield, value): """ Replaces a new value in a subfield snippet for a MARC field :param context: Context :param code: Field code :param subfield: MARC subfield, if numeric and code < 050, assume value is the position :param value: value of subfield """ if context.snippet is None: return None marc_fields = context.marc_record.get_fields(code) for field in marc_fields: if field.is_control_field(): position = subfield old_value = list(field.value()) old_value[int(position)] = value field.value = old_value else: subfield_value = field.delete_subfield(subfield) new_value = subfield_value.replace(context.snippet, value) field.add_subfield(subfield,new_value)
67ddf74faef5295028d27725534e05c888fba36a
698,430
from typing import Any from typing import Optional def _strat_has_unitary_from_has_unitary(val: Any) -> Optional[bool]: """Attempts to infer a value's unitary-ness via its _has_unitary_ method.""" if hasattr(val, '_has_unitary_'): result = val._has_unitary_() if result is NotImplemented: return None return result return None
96965cfec5f5c3c29ea641c95f20a2d4b1f3b859
698,431
import os import tarfile def dir_to_tar(src_path, dst_path, dst_name=None): """ 将目录打包为tar.gz文件 :param src_path: 被打包目录 :param dst_path: tar文件存放目录 :param dst_name: tar文件名 :return: tar.gz生成路径 """ if not os.path.exists(src_path): return "" if not dst_name or "" == dst_name: dst_name = os.path.basename(src_path) + ".tar.gz" if not dst_name.find(".tar.gz"): dst_name += ".tar.gz" tar_path = dst_path + "/" + dst_name if os.path.exists(tar_path): os.remove(tar_path) tar_obj = tarfile.open(tar_path, "w:gz") tar_obj.add(src_path, arcname=os.path.basename(src_path)) tar_obj.close() return os.path.abspath(tar_path)
8b5c031a4e80e43a13ca97fd4ab93e4e72ba8f9c
698,432
def write_keyvalue_toxlsx(worksheet, row, key, value): """Write a dictionary to excel sheet.""" if type(value).__name__ == 'dict': worksheet.write(row, 0, key) row += 1 for dictkey, dictvalue in value.items(): row = write_keyvalue_toxlsx(worksheet, row, dictkey, dictvalue) elif type(value).__name__ == 'list': for listvalue in value: row = write_keyvalue_toxlsx(worksheet, row, key, listvalue) else: worksheet.write(row, 0, key) if type(value).__name__ == 'bool': worksheet.write(row, 1, str(int(value))) else: worksheet.write(row, 1, value) row += 1 return row
c89111a35b182824664624c9b41358a15351f91d
698,433
def BFS(map_dict, start, end, verbose): """ BFS algorithm to find the shortest path between the start and end points on the map, following the map given by map_dict If an invalid end location is given then it will return the longest possible path and its length. Returns the length of the shortest route, and the overall path. """ initPath = [start] pathQueue = [initPath] longest = 0 longest_path = [] moves = {1:(0,1), 2:(0,-1), 3:(-1, 0), 4:(1,0)} while len(pathQueue) != 0: # Get and remove oldest element of path queue: tmpPath = pathQueue.pop(0) if verbose: print('Current BFS path: ', tmpPath) lastNode = tmpPath[-1] if len(tmpPath) > longest: longest = len(tmpPath) longest_path = tmpPath # If the last node in the path is the end, shortest route found if lastNode == end: return tmpPath, len(tmpPath) # Otherwise add all new possible paths to path queue: for i in range(1,5,1): dx, dy = moves[i] pos_x, pos_y = lastNode testpos = (pos_x + dx, pos_y + dy) # Check if legit path and not backtracking if map_dict[testpos] > 0 and testpos not in tmpPath: newPath = tmpPath + [testpos] pathQueue.append(newPath) # If no path found return longest path: return longest_path, longest
f6770fe8cd5f6c4ebffce2a78992c76cfbb5bd31
698,434
from functools import reduce def pipeline(source, functions): """Apply an array of functions to a source iterable""" return reduce(lambda x, y: y(x), functions, source)
c79977ce46a6be15fc428530d94f8c4878a68ede
698,435
import torch def project_pose(x, camera=None, **kwargs): """ Args x: 3xN points in world coordinates R: 3x3 Camera rotation matrix T: 3x1 Camera translation parameters f: 2 Camera focal length c: 2x1 Camera center k: 3x1 Camera radial distortion coefficients p: 2x1 Camera tangential distortion coefficients Returns ypixel: 2xN points in pixel space """ if camera: device = x.device R = torch.as_tensor(camera['R'], device=device, dtype=torch.float32) T = torch.as_tensor(camera['T'], device=device, dtype=torch.float32) f = torch.as_tensor([[camera['fx']], [camera['fy']]], device=device, dtype=torch.float32) c = torch.as_tensor([[camera['cx']], [camera['cy']]], device=device, dtype=torch.float32) else: R = kwargs['R'] T = kwargs['T'] f = kwargs['f'] c = kwargs['c'] xcam = torch.mm(R, x - T) y = xcam[:2] / xcam[2] ypixel = (f * y) + c return ypixel
f407828feb3caf9812e41a79da90c8486b096c89
698,437
def update_letter_view(puzzle: str, view: str, position: int, guess: str) -> str: """Return the updated view based on whether the guess matches position in puzzle >>> update_letter_view('apple', 'a^^le', 2, 'p') p >>> update_letter_view('banana', 'ba^a^a', 0, 'b') b >>> update_letter_view('bitter', '^itter', 0, 'c') ^ """ if guess == puzzle[position]: return puzzle[position] return view[position]
b7c534acdbc57c04e1f7ee6a83fc94ff2734948a
698,438
def multiplex(*brules): """ Multiplex many branching rules into one """ def multiplex_brl(expr): seen = set([]) for brl in brules: for nexpr in brl(expr): if nexpr not in seen: seen.add(nexpr) yield nexpr return multiplex_brl
5245b4eb68a7ae00bc15f9b8071a225d33376781
698,439
def _remove_duplicates(items): """Return `items`, filtering any duplicate items. Disussion: https://stackoverflow.com/a/7961390/4472195 NOTE: this requires Python 3.7+ in order to preserve order Parameters ---------- items : list [description] Returns ------- list Updated `items` """ return list(dict.fromkeys(items))
b44a16ed815e8cc09598fe2dc8e9871d45d0ae7e
698,441
import re def seqs_dic_count_lc_nts(seqs_dic): """ Count number of lowercase nucleotides in sequences stored in sequence dictionary. >>> seqs_dic = {'seq1': "gtACGTac", 'seq2': 'cgtACacg'} >>> seqs_dic_count_lc_nts(seqs_dic) 10 >>> seqs_dic = {'seq1': "ACGT", 'seq2': 'ACGTAC'} >>> seqs_dic_count_lc_nts(seqs_dic) 0 """ assert seqs_dic, "Given sequence dictionary empty" c_uc = 0 for seq_id in seqs_dic: c_uc += len(re.findall(r'[a-z]', seqs_dic[seq_id])) return c_uc
4542885f22e255fa0f3d14a9d4ef523fdf19f2e8
698,442
def get_coa_urls(): """ Get CoA URLs for given samples or projects. """ # Get certificate data and return the short links. return NotImplementedError
7401fd862d982d15d0ddbed93ae00792291526a0
698,443
def gaussian_kernel(D, sigma): """ Applies Gaussian kernel element-wise. Computes exp(-d / (2 * (sigma ^ 2))) for each element d in D. Parameters ---------- D: torch tensor sigma: scalar Gaussian kernel width. Returns ------- torch tensor Result of applying Gaussian kernel to D element-wise. """ return (-D / (2 * (sigma ** 2))).exp()
22885ccb785893522e57861b82612989945cd5cf
698,444
def trigger_event(connection, id, fields=None, error_msg=None): """Trigger an event. Args: connection(object): MicroStrategy connection object returned by `connection.Connection()`. id(str): ID of the event error_msg (string, optional): Custom Error Message for Error Handling Returns: HTTP response object returned by the MicroStrategy REST server. """ url = f'{connection.base_url}/api/events/{id}/trigger' return connection.post( url=url, params={'fields': fields} )
418994565cd20cac681575286553d4fa92cf89c9
698,445
import argparse def _parse_args(): """Parse command-line arguments.""" parser = argparse.ArgumentParser() #parser.add_argument("--gpus", type = int, default = 1) #parser.add_argument("--precision", type = int, default = 16) parser.add_argument("--progress_bar_refresh_rate", type = int, default = 1) parser.add_argument("--row_log_interval", type = int, default = 1) args = parser.parse_args() return args
e4f9735ad9e90b8b840f3786e6ac614da9cbe45c
698,447
def best_score(a_dictionary): """Returns a key with the biggest integer value. Checks if a thing is a dicrionary with the is instance method""" if not isinstance(a_dictionary, dict) or len(a_dictionary) == 0: return None ret = list(a_dictionary.keys())[0] big = a_dictionary[ret] for k, v in a_dictionary.items(): if v > big: big = v ret = k return (ret)
e168c86e615539af525050ce400fa578cac603b9
698,448
def eratosthenes(n): """ This function is titled eratosthenes(n) because it uses the Eratosthenes Sieve to produce a list of prime numbers. It takes in one positive integer arguement, the returns a list of all the primes less than the given number. Args: n (int): User input Returns: primes (list): List of prime numbers less than the user input """ primes = [] #checks for positive number if n<=0: print("Please enter a positive number") return #creates list of numbers up to n count = 2 while count<=n-1: primes.append(count) count +=1 p = 2 #starts with 2, then removes all multiples of 2. Moves to 3, removes #all multiples of 3, and so on until we have a list of prime numbers #less than n. while p<=n: for i in range(2, n+1): if i*p <= n: #checks to see if the number was already removed from the list if i*p not in primes: continue else: primes.remove(i*p) p +=1 return primes
7c0e0e89c1571dfc7a87883423a217ef36a984af
698,449
def y_aver_bot(yw_mol, ypf_mol): """ Calculates the average mol concentration at bottom of column. Parameters ---------- yw_mol : float The mol concentration of waste, [kmol/kmol] ypf_mol : float The mol concentration of point of feed, [kmol/kmol] Returns ------- y_aver_top : float The average mol concentration at top of column, [kmol/kmol] References ---------- Дытнерский, стр.230, формула 6.8 """ return (yw_mol + ypf_mol) / 2
21a438551cc7f6c3774999bafa30495dffd99201
698,450
def _convert_to_numpy_obj(numpy_dtype, obj): """Explicitly convert obj based on numpy type except for string type.""" return numpy_dtype(obj) if numpy_dtype is not object else str(obj)
54a7bd1576ec95456d47e2e3957370c0970cb376
698,451
from datetime import datetime def start_of_month(dt, d_years=0, d_months=0): """ Given a date, return a date first day of the month. @param dt: The date to base the return value upon. @param d_years: Specify a delta in years to apply to date. @param d_months: Specify a delta in months to apply to date. @see http://code.activestate.com/recipes/476197-first-last-day-of-the-month/ """ y, m = dt.year + d_years, dt.month + d_months a, m = divmod(m-1, 12) return datetime(y+a, m+1, 1)
90976e7df97b8581622df77528540487181cbb8f
698,452
import os import uuid def make_job_data(url, script_fn): """Choose defaults. Run in same directory as script_fn. Base job_name on script_fn. """ wd = os.path.dirname(script_fn) job_name = '{0}-{1}-{1}'.format( os.path.basename(script_fn), url.split("/")[-1], str(uuid.uuid4())[:8], ) job_data = {"job_name": job_name, "cwd": wd, "script_fn": script_fn } return job_data
8dcc4e0064421964b4e2c717d2e8927cfebd15ff
698,453
def frames(string): """Takes a comma separate list of frames/frame ranges, and returns a set containing those frames Example: "2,3,4-7,10" => set([2,3,4,5,6,7,10]) """ def decode_frames(string): print('s', string) if '-' in string: a = string.split('-') start, end = int(a[0]), int(a[1]) for i in range(start, end + 1): yield i else: yield int(string) framesList = string.split(',') frames = set() for strFrames in framesList: for frameNumber in decode_frames(strFrames): frames.add(frameNumber) return frames
a00a80bba4ab369a9682db6b62dad88911ecc711
698,454
def key_exists(dictionary, *keys): """Check if key exist in a nested dictionary. :param dictionary: Nested dicionary :type dictionary: dict :param keys: Keys to test agains the dictionary :type keys: args :return: ``True`` if at least one key is in the dicionary :rtype: bool :raise KeyError: If no key is in the dictionary """ item = dictionary for key in keys: try: item = item[key] except KeyError: return False return True
5bf414987e3bf25083cecdcff0af5c201561b15a
698,455
import importlib def class_from_module_path(module_path): """Given the module name and path of a class, tries to retrieve the class. The loaded class can be used to instantiate new objects. """ # load the module, will raise ImportError if module cannot be loaded if "." in module_path: module_name, _, class_name = module_path.rpartition('.') m = importlib.import_module(module_name) # get the class, will raise AttributeError if class cannot be found return getattr(m, class_name) else: return globals()[module_path]
e1b4935efd165c38c6394274c030c52eca5e241d
698,456
import json def monitor(plugin): """Monitors channels of this node.""" reply = {} reply['num_connected'] = 0 reply['num_channels'] = 0 reply['format-hint'] = 'simple' peers = plugin.rpc.listpeers() info = plugin.rpc.getinfo() nid = info["id"] chans = {} states = {} for p in peers['peers']: for c in p['channels']: if p['connected']: reply['num_connected'] += 1 reply['num_channels'] += 1 state = c['state'] if state in states: states[state] += 1 else: states[state] = 1 connected = 'connected' if p['connected'] else 'disconnected' funding = c['funding_allocation_msat'] our_funding = funding[nid] fees = "our fees" if int(our_funding) == 0: fees = "their fees" total = int(c['total_msat']) ours = int(c['our_reserve_msat']) + int(c['spendable_msat']) our_fraction = '{:4.2f}% owned by us'.format(ours * 100 / total) tmp = "\t".join([p['id'], connected, fees, our_fraction, c['short_channel_id'] if 'short_channel_id' in c else 'unknown scid']) if state in chans: chans[state].append(tmp) else: chans[state] = [tmp] reply['states'] = [] for key, value in states.items(): reply['states'].append(key + ": " + str(value)) reply['channels'] = json.dumps(chans) return reply
22a3dd3f161f2c5adc91d7d869981bab0ae73450
698,458
def get_ocr_json_url_for_an_image(first_three_digits, second_three_digits, third_three_digits, fourth_three_digits, image_name): """ Get the URL of a JSON file given a barcode in 4 chunks of 3 digits and an image name (1, 2, 3, front_fr...). """ url = "https://world.openfoodfacts.org/images/products/" url += "%s/%s/%s/%s/%s.json" % ( first_three_digits, second_three_digits, third_three_digits, fourth_three_digits, image_name ) return url
44c4d70971f24beee622ff2e11bef0474001aaa3
698,459
def cumulate_productivity(df, window=5): """Compute productivity over current and past 4 years.""" return (df.pivot(index="scopus_id", columns="t", values="SJR") .fillna(0).rolling(window, min_periods=1, axis=1).sum() .reset_index().melt(id_vars="scopus_id", value_name="SJR"))
2330b39408136b4a0609f02c1174498c857085cb
698,460
def tupleRemoveByIndex(initTuple, indexList): """ Remove the elements of given indices from a tuple. Parameters ---------- initTuple : tuple of any The given tuple from which we will remove elements. indexList : list of ints The indices of elements that we want to remove. Returns ------- tuple of any The tuple after removing elements from initTuple """ initList = list(initTuple) indexSet = set(indexList) resList = [] for i in range(len(initList)): if not (i in indexSet): resList.append(initList[i]) return tuple(resList)
17009fc3616afa586467f43b1e7910a17c89ed0d
698,461
import argparse def prepare_arguments(): """ Define and return arguments. """ parser = argparse.ArgumentParser(description="PyTorch Energy Experiment v0_1") # model training parser.add_argument("--batch-size", type=int, default=64, metavar="N", help="input batch size for training (default: 64)") parser.add_argument("--test-batch-size", type=int, default=1000, metavar="N", help="input batch size for testing (default: 1000)") parser.add_argument("--epochs", type=int, default=5, metavar="N", help="number of epochs to train (default: 5)") parser.add_argument("--lr", type=float, default=1.0, metavar="LR", help="learning rate (default: 1.0)") parser.add_argument("--gamma", type=float, default=0.7, metavar="M", help="Learning rate step gamma (default: 0.7)") parser.add_argument("--no-cuda", action="store_true", default=False, help="disables CUDA training") parser.add_argument("--dry-run", action="store_true", default=False, help="quickly check a single pass") parser.add_argument("--seed", type=int, default=0, metavar="S", help="random seed (default: 0)") parser.add_argument("--log-interval", type=int, default=10, metavar="N", help="how many batches to wait before logging training status") # model saving / loading parser.add_argument("--save-model", action="store_true", default=False, help="save the current model") parser.add_argument("--load-model", action="store_true", default=False, help="load a model") parser.add_argument("--save-name", type=str, default="0", metavar="NAME", help="name with which the model will be saved or loaded") # visualization parser.add_argument("--vis", action="store_true", default=False, help="visualize model performance and attribution") parser.add_argument("--num-vis", type=int, default=10, metavar="N", help="number of instanced to be visualized") parser.add_argument("--vis-agg", action="store_true", default=False, help="aggregate the attribution of all \"num-vis\" instances before the visualization)") parser.add_argument("--vis-next", type=int, default=0, metavar="N", help="skips the first vis_next * num_vis instances, can visualize other instances that way") parser.add_argument("--vis-save", action="store_true", default=False, help="save the visualization, otherwise simply show it") parser.add_argument("--vis-input", action="store_true", default=False, help="enter own inputs for the visualization") parser.add_argument("--baseline", type=str, default="0", metavar="NAME OR NUMBER", help="which baseline to use (\"edges\", \"random\", or a number as the baseline)") parser.add_argument("--vis-real-values", action="store_true", default=False, help="also show the unnormalized values on the visualization") parser.add_argument("--vis-only-input", type=int, default=-1, metavar="N", help="only visualize for specific input") args = parser.parse_args() return args
8019c31bf529b14178b63498d54f9ec6fde6ea4f
698,462
def format_one_query(q, read_seq, read_coords, barcode_dict=None): """Formats output. Parameters ---------- q : dict A dictionary of fuzzy searching result. Key is levenshtein distance. Value is list of matched barcodes. read_seq : str A DNA string (full length read). read_coords : tuple or list The positions of read used for comparison. barcode_dict : dict, optional Names for the matched barcodes. Keys are barcode sequences. Values are alternative names. Returns ------- str Sequencing read (full length). str Matched barcode or barcode name. str Levenshtein distance. """ x, y = read_coords read_seq = (read_seq[:x].lower() + read_seq[x:y] + read_seq[y:].lower()) if barcode_dict: barcode = barcode_dict[q[0]] else: barcode = q[0] return read_seq, barcode, str(q[1])
2523ca6e15547a89e466fc4b4c9dad322fe91dbf
698,463
import torch def regression(src_bbox, loc): """ Apply regression, by applying the offset coefficient Input: srx_bbox [N, R, 4]: coordinates of bounding boxes in XYXY.REL format loc [N, R, 4]: offset coefficient t_x, t_y, t_w, t_h in CxCyWH.REL format Return: dst_bbox[N, R, 4]: regressed anchor CXCYWH """ # convert XYXY -> XYWH src_h = src_bbox[:, :, 3] - src_bbox[:, :, 1] src_w = src_bbox[:, :, 2] - src_bbox[:, :, 0] src_ctr_x = src_bbox[:, :, 0] + 0.5 * src_h src_ctr_y = src_bbox[:, :, 1] + 0.5 * src_w # unwrap cache dx = loc[:, :, 0] dy = loc[:, :, 1] dw = loc[:, :, 2] dh = loc[:, :, 3] # compute XYWH for dst_bbox ctr_x = dx * src_w + src_ctr_x ctr_y = dy * src_h + src_ctr_y w = torch.exp(dw) * src_w h = torch.exp(dh) * src_h # convert XYWH -> XYXY CXCYWH dst_bbox = torch.zeros(loc.shape, dtype=loc.dtype, device=src_bbox.device) dst_bbox[:, :, 0] = ctr_x#ctr_x - 0.5 * w dst_bbox[:, :, 1] = ctr_y #ctr_y - 0.5 * h dst_bbox[:, :, 2] = w #ctr_x + 0.5 * w dst_bbox[:, :, 3] = h #ctr_y + 0.5 * h return dst_bbox
1d6137ecf1895fc92f63c37a510f2d0f3d68f54f
698,464
def _filter3(meta, filter_peak=0.5): """ Filter peak based on the number of cells. """ meta = meta[(meta>0).sum(axis=1)>meta.shape[1]*filter_peak] return meta.T
06842b557422cd63589c63d96ce3b19853357844
698,465
from typing import Tuple def create_categorical_encoder_and_decoder(categorical_variables: list[str]) -> Tuple[dict, dict]: """ Given a list of categorical variables, returns an encoder and decoder. Encoder Key = category. Value = integer encoding. Decoder Key = integer encoding. Value = category. """ decoder = {} encoder = {} for idx, variable in enumerate(categorical_variables): decoder[idx] = variable encoder[variable] = idx return decoder, encoder
70de5c8a3e1667da2776a3750c1fae1edf9fd8ae
698,466
def update_dependencies(new_dependencies, existing_dependencies): """Update the source package's existing dependencies. When a user passes additional dependencies from the command line, these dependencies will be added to the source package's existing dependencies. If the dependencies passed from the command line are existing dependencies, these existing dependencies are overwritten. Positional arguments: new_dependencies (List[str]) -- the dependencies passed from the command line existing_dependencies (List[str]) -- the dependencies found in the source package's index.json file """ # split dependencies away from their version numbers since we need the names # in order to evaluate duplication dependency_names = set(dependency.split()[0] for dependency in new_dependencies) index_dependency_names = set(index.split()[0] for index in existing_dependencies) repeated_packages = index_dependency_names.intersection(dependency_names) if len(repeated_packages) > 0: for index_dependency in existing_dependencies: for dependency in repeated_packages: if index_dependency.startswith(dependency): existing_dependencies.remove(index_dependency) existing_dependencies.extend(new_dependencies) return existing_dependencies
7f520c0c980cd0b929be32d9d4cceb8c99f75934
698,467
def read_brecon(infile, tree, stree): """ Reads branch reconciliation from file """ brecon = {} for line in infile: tokens = line.rstrip().split("\t") # parse node node_name = tokens[0] if node_name.isdigit(): node_name = int(node_name) node = tree[node_name] events = [] for i in range(1, len(tokens), 2): snode_name = tokens[i] event = tokens[i+1] if snode_name.isdigit(): snode_name = int(snode_name) snode = stree[snode_name] events.append([snode, event]) brecon[node] = events return brecon
366295c2196df5c45bae9d8a59d068814285b168
698,468
def _convert_to_int_list(check_codes): """Takes a comma-separated string or list of strings and converts to list of ints. Args: check_codes: comma-separated string or list of strings Returns: list: the check codes as a list of integers Raises: ValueError: if conversion fails RuntimeError: if cannot determine how to convert input """ if isinstance(check_codes, list): if all(isinstance(x, int) for x in check_codes): return check_codes # good input else: return [int(x) for x in check_codes] # list of str elif isinstance(check_codes, str): return [int(x) for x in check_codes.split(",")] # str, comma-separated expected raise RuntimeError("Could not convert values: {} of type {}".format(check_codes, type(check_codes)))
965a68466d0aab358043cedb4838ac9d99ce3249
698,469
def get_note_freq(p): """ Return the frequency corresponding to a particular note number Parameters ---------- p: int Note number, in halfsteps. 0 is a concert a """ return 440*2**(p/12)
cec12355f481494fa53fb0ed535ecd82fd038016
698,470
def stringify_parameters(items): """ Convert all items in list to string. """ return [str(item) for item in items]
41a47f611d1514043eeac27de6c8be6c01607649
698,471
def get_current_spec_list(ctx): """ Get the current spec list, either from -p/--project-specs or --specs-to-include-in-project-generation or specs_to_include_in_project_generation in user_settings.options :param ctx: Current context :return: The current spec list """ try: return ctx.current_spec_list except AttributeError: pass # Get either the current spec being built (build) or all the specs for the solution generation (configure/msvs) current_spec = getattr(ctx.options, 'project_spec') if not current_spec: # Specs are from 'specs_to_include_in_project_generation' spec_string_list = getattr(ctx.options, 'specs_to_include_in_project_generation', '').strip() if len(spec_string_list) == 0: ctx.fatal( "[ERROR] Missing/Invalid specs ('specs_to_include_in_project_generation') in user_settings.options") spec_list = [spec.strip() for spec in spec_string_list.split(',')] if len(spec_list) == 0: ctx.fatal("[ERROR] Empty spec list ('specs_to_include_in_project_generation') in user_settings.options") else: spec_list = [current_spec] # Vet the list and make sure all of the specs are valid specs for spec in spec_list: if not ctx.is_valid_spec_name(spec): ctx.fatal( "[ERROR] Invalid spec '{}'. Make sure it exists in the specs folder and is a valid spec file.".format( spec)) ctx.current_spec_list = spec_list return ctx.current_spec_list
e0e2788b86bf005b6986ab84f507d784ef837cca
698,472
import os import subprocess def create_pager(highlight_text=None): """ Returns a pipe to PAGER or "less" """ pager_cmd = os.environ.get('PAGER') if not pager_cmd: pager_cmd = ['less', '-r'] if highlight_text: pager_cmd.extend(['-p', highlight_text]) pager = subprocess.Popen(pager_cmd, stdin=subprocess.PIPE) return pager
1fde7ecffd050122b2f9b41b5cf5e43b0e7e688c
698,473
def title_from_name(name): """ Create a title from an attribute name. """ def _(): """ Generator to convert parts of title """ try: int(name) yield 'Item #%s'% name return except ValueError: pass it = iter(name) last = None while 1: ch = it.next() if ch == '_': if last != '_': yield ' ' elif last in (None,'_'): yield ch.upper() elif ch.isupper() and not last.isupper(): yield ' ' yield ch.upper() else: yield ch last = ch return ''.join(_())
44182a7aefc552701517292563717884835230aa
698,474
def fill_empties(abstract): """Fill empty cells in the abstraction The way the row patterns are constructed assumes that empty cells are marked by the letter `C` as well. This function fill those in. The function also removes duplicate occurrances of ``CC`` and replaces these with ``C``. Parameters ---------- abstract : str The abstract representation of the file. Returns ------- abstraction : str The abstract representation with empties filled. """ while "DD" in abstract: abstract = abstract.replace("DD", "DCD") while "DR" in abstract: abstract = abstract.replace("DR", "DCR") while "RD" in abstract: abstract = abstract.replace("RD", "RCD") while "CC" in abstract: abstract = abstract.replace("CC", "C") if abstract.startswith("D"): abstract = "C" + abstract if abstract.endswith("D"): abstract += "C" return abstract
cc27354fd50ac8588c8374e06025a2ceeff691c6
698,475
def normal_shock_density_ratio(M, gamma): """GIves the normal shock density ratio as a function of upstream Mach number.""" return ((gamma+1.0)*M**2.0)/(2.0+(gamma-1.0)*M**2.0)
780e435602278f02d3a1839f460edf1ce40a5a5a
698,476
def pretty_print(state): """ Returns a 3x3 string matrix representing the given state. """ assert len(state) == 9 str_state = [str(i) for i in state] lines = [' '.join(map(str, l)) for l in [state[:3], state[3:6], state[6:]]] return '\n'.join(lines)
67fb7b091e7256d1fa71f2936d3c8989d5a90f0e
698,477
def I_props(D, B, TF, TW, ETABS=True): """Properties of an I-section >>> I_props(12, 15, 3, 2) {'P': 74, 'A': 90, 'Avy': 72, 'Avz': 30, 'Iyy': 2767.5, 'Izz': 858.0} """ A = B*D - (B-TW)*(D-2*TF) Iyy = B*D**3/12 - (B-TW)*(D-2*TF)**3/12 Zyy = 2 * Iyy / D Syy = B * TF * (D - TF) + 0.25 * TW * (D - 2 * TF)**2 Izz = TF*B**3/6 - (D - 2*TF) * TW**3 / 12 Zzz = 2 * Izz / B Szz = 0.5 * TF * B**2 + 0.25 * (D - 2*TF) * TW**2 if ETABS: return {'P':2*D+4*B-2*TW ,'A': A, 'AS3': 5/3*TF*B, 'AS2': TW*(D-TF), 'I33': Iyy, 'I22': Izz, 'S33': Zyy, 'S22': Zzz, 'Z33': Syy, 'Z22': Szz} else: return {'P':2*D+4*B-2*TW ,'A': A, 'Avy': 2*TF*B, 'Avz': TW*D, 'Iyy': Iyy, 'Izz': Izz, 'Zyy': Zyy, 'Zzz': Zzz, 'Syy': Syy, 'Szz': Szz}
c10d8c0e0bf65bfa17d4b29250d42db30466eccc
698,478
from typing import List from typing import Dict from typing import Any def expand_decks_list( decks: List[Dict[str, Any]], deck_info: Dict[str, Any], ) -> List[Dict[str, Any]]: """ Given a list of decks and their cards, fills in the missing information using the detailed decks specification """ for item in decks: name = item['name'] info = deck_info[name] item['id'] = info['id'] if 'parent-id' in info: item['parent-id'] = info['parent-id'] return decks
1a50f5c2ba461be54605cf7637d64a8257200494
698,479
import json def decode_req_encode_rsp(f): """Decorator to decode incoming requests and encode responses.""" def decode_inner(req): return json.dumps(f(json.loads(req))) return decode_inner
ee41df3de3c41fbe2f32ddf4a07e769abfe31fda
698,480
import re def find_images(document): """Returns the list of image filepaths used by the `document`.""" images = [] for line in document: match = re.match(r"\.\. image::\s+(img\/.+)", line) if match: images.append(match[1]) return list(set(images))
2c58b04974f5ec0d1752cb405cfd314de81a841c
698,481
def extract_kwargs_from_ctx(ctx): """ Extracts kwargs from Click context manager. """ args = [] i = 0 for arg in ctx.args: if arg[:2] == '--': args.append([arg[2:]]) i += 1 else: args[(i-1)].append(arg) for i, arg in enumerate(args): if len(arg) == 1: args[i] = [arg[0], True] elif len(args) > 2: args[i] = [arg[0], ' '.join(arg[1:])] keys = [arg[0] for arg in args] if len(keys) != len(set(keys)): msg = "Your cmd arguments contain a duplicate!" raise ValueError(msg) kwargs = {arg[0]: arg[1] for arg in args} return kwargs
678a819daf33a26c599cea08817397741bfbef8a
698,482
def load_config(cfgfile): """ Parses a configuration file """ cfgf = open(cfgfile,'r') cfg = {} for l in cfgf: ps = [p.strip() for p in l.split(':')] if len(ps)==2: try: cfg[ps[0]] = float(ps[1]) except ValueError: cfg[ps[0]] = ps[1] if cfg[ps[0]] == 'False': cfg[ps[0]] = False elif cfg[ps[0]] == 'True': cfg[ps[0]] = True cfgf.close() return cfg
283a36f1d92c58b0018fe62714cc9dc23f96c898
698,483
import os def get_file_with_different_version_location(file_name): """ Gets file location :param file_name: File name :return : File location """ sample_app_dir = os.path.dirname(os.path.realpath(__file__)) return sample_app_dir + '\\Sample_Apps_2\\' + file_name
8200ae91ddc2c3833a1975aec1ec4664e1149337
698,484
def rotate(A, turns=1): """A matrix which is formed by rotating the given matrix, n times, in clockwise sense. Args ---- A (compulsory) A matrix. turns (int, optional) The number of turns to rotate the matrix. Defaults to 1. Returns ------- Matrix The matrix obtained on rotating the given matrix. """ turns = turns % 4 if turns == 0: return A elif turns == 2: Rotated_A = [i[::-1] for i in A] return rotate(Rotated_A, turns-2) else: Rotated_A = [[A[j][i] for j in range(len(A[i]))][::-1] for i in range(len(A))] return rotate(Rotated_A, turns-1)
c6fb428c15317b314443101093ec96019be52bfa
698,485
def determine_time_system(header: dict) -> str: """Determine which time system is used in an observation file.""" # Current implementation is quite inconsistent in terms what is put into # header. try: file_type = header['RINEX VERSION / TYPE'][40] except KeyError: file_type = header['systems'] if file_type == 'G': ts = 'GPS' elif file_type == 'R': ts = 'GLO' elif file_type == 'E': ts = 'GAL' elif file_type == 'J': ts = 'QZS' elif file_type == 'C': ts = 'BDT' elif file_type == 'I': ts = 'IRN' elif file_type == 'M': # Else the type is mixed and the time system must be specified in # TIME OF FIRST OBS row. ts = header['TIME OF FIRST OBS'][48:51] else: raise ValueError(f'unknown file type {file_type}') return ts
00a7908aa5eaf21eaaa39d97b23304644ebe27b4
698,486
def _all(oper, left, right): """Short circuit all for ndarray.""" return all(oper(ai, bi) for ai, bi in zip(left, right))
fe982f59086d2aa688004f83f9c63c6ecf6919f4
698,487
def expand_header(row): """Parse the header information. Args: List[str]: sambamba BED header row Returns: dict: name/index combos for fields """ # figure out where the sambamba output begins sambamba_start = row.index('readCount') sambamba_end = row.index('sampleName') coverage_columns = row[sambamba_start + 2:sambamba_end] thresholds = {int(column.replace('percentage', '')): row.index(column) for column in coverage_columns} keys = { 'readCount': sambamba_start, 'meanCoverage': sambamba_start + 1, 'thresholds': thresholds, 'sampleName': sambamba_end, 'extraFields': slice(3, sambamba_start) } return keys
740790e5aa0f5415d3cb0fc22902403b16bef455
698,488
def cap_text(text): """ Capitalize first letter of a string :param text: input string :return: capitalized string """ return text.title()
8403db85d37399db3b4b0693bc9e578ddcf01c3b
698,489
def size_as_recurrence_map(size, sentinel=''): """ :return: dict, size as "recurrence" map. For example: - size = no value, will return: {<sentinel>: None} - size = simple int value of 5, will return: {<sentinel>: 5} - size = timed interval(s), like "2@0 22 * * *:24@0 10 * * *", will return: {'0 10 * * *': 24, '0 22 * * *': 2} """ if not size and size != 0: return {sentinel: None} return {sentinel: int(size)} if str(size).isdigit() else { part.split('@')[1]: int(part.split('@')[0]) for part in str(size).split(':')}
203bc0697cca9b3710f4079de03e759659116883
698,490
def get_field_kwargs(field_name, model_field): """ Creates a default instance of a basic non-relational field. """ kwargs = {} return kwargs
d05d5f42a4ecc2669486d049918d271013a6969f
698,491
import argparse def parse_command_line_arguments(): """ Parse command line arguments. :return: parsed arguments """ parser = argparse.ArgumentParser(description="Pure Python command-line RSS reader") parser.add_argument( "--version", action="version", version="Version 5.0.0", help="Print version info", ) parser.add_argument("source", type=str, nargs="?", default=None, help="RSS URL") parser.add_argument( "--limit", type=int, help="Limit news topics if this parameter provided" ) parser.add_argument( "--json", action="store_true", help="Print result as JSON in stdout" ) parser.add_argument( "--verbose", action="store_true", help="Outputs verbose status messages" ) parser.add_argument( "--colorize", action="store_true", help="Prints news to the console in colorized mode" ) parser.add_argument( "--date", type=str, help="Return news from date yyyymmdd from cash" ) parser.add_argument( "--to-pdf", type=str, help=r"Save news in pdf format in chosen path, eg 'E:\data' or '/home/user/data'" ) parser.add_argument( "--to-html", type=str, help=r"Save news in html format in chosen path, eg 'E:\data' or '/home/user/data'" ) arguments = parser.parse_args() return arguments
b271785eb92ade63f0c103ac83f4e71910bd41cd
698,492
import requests import json def get_mactable(auth): """ Function to get list of mac-addresses from Aruba OS switch :param auth: AOSSAuth class object returned by pyarubaoss.auth :return list of mac-addresses :rtype list """ headers = {'cookie': auth.cookie} url_mactable = "http://" + auth.ipaddr + "/rest/"+auth.version+"/mac-table" try: r = requests.get(url_mactable, headers=headers) mactable = json.loads(r.text)['mac_table_entry_element'] return mactable except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " get_mactable: An Error has occured"
96c064696bf47872e5a1bec5e8770d6470b76bfc
698,493
def es_primo(n: int) -> bool: """Determina si un número es primo. :param n: número a evaluar. :n type: int :return: True si es primo, False en caso contrario. :rtype: bool """ if n == 2 or n == 3: return True if n % 2 == 0 or n < 2: return False for i in range(3, int(n**0.5)+1, 2): if n % i == 0: return False return True
fff4802d6e47c379b6510f13e2ffe9a9d56429d2
698,494
def replace_string(string_value, replace_string, start, end): """ Replaces one string by another :param string_value: str, string to replace :param replace_string: str, string to replace with :param start: int, string index to start replacing from :param end: int, string index to end replacing :return: str, new string after replacing """ first_part = string_value[:start] second_part = string_value[end:] return first_part + replace_string + second_part
b79d685a13c427334149789195e5fbbb4a6ad28d
698,495
import hashlib def hash_str(data, hasher=None): """Checksum hash a string.""" hasher = hasher or hashlib.sha1() hasher.update(data) return hasher
5dd433a3cc04037bb439b64b71d3495d03f51ef1
698,496
def get_next_point(df, inc, axis): """ Increment to get the next points along. Parameters ---------- df : pandas DataFrame The dataframe of points inc : int The amount to increment axis : str The axis along which to increment. Can be 'x', 'y', or 'z' """ df = df.copy() # Need to increment the axis x_ind = df.index.get_level_values('x').to_numpy() y_ind = df.index.get_level_values('y').to_numpy() z_ind = df.index.get_level_values('z').to_numpy() if axis == 'x': x_ind += inc elif axis == 'y': y_ind += inc elif axis == 'z': z_ind += inc # Add the new incremented indices df['x'] = x_ind df['y'] = y_ind df['z'] = z_ind df = df.set_index(['z', 'y', 'x']) # Need to increment eta_l df.eta_l -= inc # Need to increment eta_r df.eta_r -= inc return df
5b91f2f23f188891ff04ded3f4a76e6b552fa2a0
698,497
def load_input(filename): """ Load poker hands from filename """ hands = [] with open(filename) as f: for line in f.readlines(): tokens = line.strip().split(" ") cards = [] for token in tokens: value = token[0] suit = token[1] if value == "A": value = 14 elif value == "K": value = 13 elif value == "Q": value = 12 elif value == "J": value = 11 elif value == "T": value = 10 else: value = int(value) cards.append((value, suit)) hands.append([cards[:5], cards[5:]]) return hands
ba79c8163098be587729f788ce231c02204f77ce
698,498
def _getresponse(self): """Monkey-patched replacement getresponse method""" intercept = self._intercept() if intercept['mode'] == 'normal': response = self._orig_getresponse() if 'recorder' in intercept: intercept['recorder']._record_response(response) # Ensure chunked is not set, since the StringIO replacement # goofs it up response.chunked = 0 return response else: return intercept['playback'].getresponse()
9131ac0aa03c841cb971456d7b04b1e942dd1e78
698,500
import functools def synchronize(lock): """ Decorator that invokes the lock acquire call before a function call and releases after """ def sync_func(func): @functools.wraps(func) def wrapper(*args, **kwargs): lock.acquire() res = func(*args, **kwargs) lock.release() return res return wrapper return sync_func
e9ac48d67cf45e1b0cf9b6e776a93f569726b5d4
698,501
def help_invocation_for_command(prefix, command, locale): """ Get the help command invocation for a command. Parameters ---------- prefix: str The command prefix to show. command: senko.Command The command to get the help invocation for. locale: senko.Locale The locale to get the help invocation for. Returns ------- str The help invocation for the given command in the given locale. """ command_name = command.get_qualified_name(locale) help_name = locale("#command_help_name") return f"{prefix}{help_name} {command_name}"
abf80554c479e3f766e6a7dc1e7bbcafdbaa6098
698,502
import torch def derivative_sigmoid(x): """Compute the derivative of the sigmoid for a given input. Args: x (torch.Tensor): The input. Returns: (torch.Tensor): The derivative at the input. """ return torch.mul(torch.sigmoid(x), 1. - torch.sigmoid(x))
00da3734436294bc8bb40189bc1298972fbe7f93
698,503
def loginUser(username, password): # noqa: E501 """ # noqa: E501 :param username: The user name for login :type username: :param password: The password for login in clear text :type password: :rtype: """ return 'do some magic!'
20466f242fd92ae09113e6fe3b2ef0c1b03b85b8
698,504
import os def parse_folder_info(path): """ Function: get the foldres and files within a particular path. Input: path Output: lists of folders and files """ folders = [f for f in os.listdir(path) if not os.path.isfile(os.path.join(path, f))] files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))] if('.DS_Store' in files): files.remove('.DS_Store') if('._.DS_Store' in files): files.remove('._.DS_Store') return folders, files
0d486a315348cdb87d696f66b95b54961a5427c4
698,505
def generate_all_nbr_from_dataframe(dist_df, nbr_distance = 100.0): """ Given a distance matrix, return a list of index positions of all neighbors within a certain distance. Parameters ---------- dist_df : DataFrame nbr_distanace : float Returns ------- all_nbrs : list list of lists Examples -------- >>> M = pd.DataFrame([[0,2,5,1], ... [2,0,3,1], ... [5,3,0,5], ... [1,1,5,0]]) >>> rmf.generate_all_nbr_from_dataframe(M,2) [0, 1, 3], [0, 1, 3], [2], [0, 1, 3]] Notes ---- Methods for iterating a DataFrame like a flat file replace: for line in open( distfile,'r'): with: option 1 x = distA.to_dict("record") for r in x: print(list(r.values())) option 2 for index, row in distA.iterrows(): print(index, row.to_list()) """ N=0 all_nbrs = [] all_dists = [] for ind, row in dist_df.iterrows(): l = row.to_list() clone_id = ind index = len(all_dists) #assert tcrs[ index ][-1]['clone_id'] == clone_id dists = [ float(x) for x in l ] if not N: N = len(dists) else: assert N == len(dists) nbrs = [] for ii,d in enumerate(dists): if d <= nbr_distance: nbrs.append( ii ) all_dists.append( dists ) all_nbrs.append( nbrs ) return(all_nbrs)
a735c25febbab446e28486174aa2d5ef3cb3d5d0
698,507
from pathlib import Path async def check_node_modules(): """Check if node_modules exists and has contents. Returns: {bool} -- True if exists and has contents. """ ui_path = Path(__file__).parent / "ui" node_modules = ui_path / "node_modules" exists = node_modules.exists() valid = exists if exists and not tuple(node_modules.iterdir()): valid = False return valid
e76703372c46982cf728b26125808e50e2b92907
698,508
def _execute_c2(state, context): """REP: Reset status bits. When used, it will set the bits specified by the 1 byte immediate value. This is the only means of setting the M and X status register bits. """ state.flags &= ~state.current_operand return context, None
d9ae38f7b9c3c8caf14e578b29693417f04b8234
698,509
def rowcol_to_xy(rows, cols, raster): """non-uri version of rowcol_to_xy_uri""" gt = raster.GetGeoTransform() X = gt[0] + cols * gt[1] + rows * gt[2] Y = gt[3] + cols * gt[4] + rows * gt[5] return (X, Y)
26ccf8da771387fffdcfc4d5b188aa562fd7be7e
698,510
import sys def memory_check(block_sizes, qlength_max, estimate_memory): """Return amount of memory required. Estimate memory consumption exactly as CESAR2.0 doest it. """ num_states, rlength, extra = 0, 0, 100000 for block_size in block_sizes: num_codons = block_size // 3 num_states += 6 + 6 * num_codons + 1 + 2 + 2 + 22 + 6 rlength += block_size MEM = (num_states * 4 * 8) + \ (num_states * qlength_max * 4) + \ (num_states * 304) + \ (2 * qlength_max + rlength) * 8 + \ (qlength_max + rlength) * 2 * 1 + extra # bytes to GB GB = MEM / 1000000000 if estimate_memory: sys.stdout.write(f"Expected memory consimption of:\n{GB} GB\n") sys.exit(0) return GB
8539fe72e864c7aedb8e7d9daf65876e78ef2fad
698,511
def test_call_fuzz(): """Fuzz some functions.""" return """ fn main() { {dest} = takes_args(fuzz(), fuzz(), fuzz()); } fn takes_args(a: u8, b: u1, c: u4) -> u4 { write_value(&b, 10); return a + b + c; } fn write_value(ptr: *u1, val: u1) { *ptr = val; } fn fuzz() -> u2 { return 1; } """
4291bc625b51715c286ef61ce910fae0257c4852
698,512