content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import ssl def generate_ssl_context(server_certificate, server_key, ca_cert): """This function generates an ssl context for Tornado handler""" mySSLContex = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) mySSLContex.load_cert_chain(server_certificate, server_key) mySSLContex.load_verify_locations(ca_cert) mySSLContex.verify_mode = ssl.CERT_REQUIRED return mySSLContex
29e8cd2bc49948e6e5b509dab312b049d0cc9aaa
699,067
def get_page(rb, cand, el_type): """ Функция принимает RoboBrowser с открытой страницей результатов голосования по какому-то УИКу rb, кол-во кандидатов в выборах cand, а также тип выборов el_type и возвращает список, состоящий из номера УИКа, кол-ва избирателей на УИКе, кол-во проголосовавших на дому и на участке, кол-во испорченных голосов, а также кол-во голосов за каждого из избирателей. """ uik_result = [] main_table = rb.parsed.body('table')[2]('tr')[3].td uik_result.append(int(main_table("table")[2]('tr')[1]('td')[1].string.split("№")[1])) main_table = main_table("table")[el_type[0]]('tr') try: uik_result.append(int(main_table[0]('td')[2].b.string)) except IndexError: for x in range(4+cand): uik_result.append(-1) return uik_result for m in range(el_type[1],el_type[1]+3): uik_result.append(int(main_table[m]('td')[2].b.string)) for l in range(-cand,0): uik_result.append(int(main_table[l]('td')[2].b.string)) return uik_result
a4fecd2dc60d8438e20870d0e96ce876bb4c1902
699,068
def makeindexline(url, startpage, total, numonpage=10, pagesonscreen=5): """ Make a menu line for a given number of inputs, with a certain number per page. Will look something like : :: First Previous 22 23 24 25 26 27 28 29 30 31 32 Next Last Each number or word will be a link to the relevant page. url should be in the format : ``'<a href="script.py?startpage=%s">%s</a>'`` - it will have the two ``%s`` values filled in by the function. The url will automatically be put between ``<strong></strong>`` tags. Your script needs to accepts a parameter ``start`` telling it which page to display. ``startpage`` is the page actually being viewed - which won't be a link. ``total`` is the number of total inputs. ``numonpage`` is the number of inputs per page - this tells makeindexline how many pages to divide the total into. The links shown will be some before startpage and some after. The amount of pages links are shown for is ``pagesonscreen``. (The actual total number shown will be *2 \* pagesonscreen + 1*). The indexes generated are *a bit* like the ones created by google. Unlike google however, next and previous jump you into the *middle* of the next set of links. i.e. If you are on page 27 next will take you to 33 and previous to 21. (assuming pagesonscreen is 5). This makes it possible to jump more quickly through a lot of links. Also - the current page will always be in the center of the index. (So you never *need* Next just to get to the next page). """ b = '<strong>%s</strong>' url = b % url outlist = [] last = '' next = '' numpages = total//numonpage if total%numonpage: numpages += 1 if startpage - pagesonscreen > 1: outlist.append(url % (1, 'First')) outlist.append('&nbsp;') outlist.append(url % (startpage-pagesonscreen-1, 'Previous')) outlist.append('&nbsp;') index = max(startpage - pagesonscreen, 1) end = min(startpage+pagesonscreen, numpages) while index <= end: if index == startpage: outlist.append(b % startpage) else: outlist.append(url % (index, index)) index += 1 outlist.append('&nbsp;') if (startpage+pagesonscreen) < numpages: outlist.append(url % (startpage+pagesonscreen+1, 'Next')) outlist.append('&nbsp;') outlist.append(url % (numpages, 'Last')) return '&nbsp;'.join(outlist)
fdae2d9385f02e3c47fff516b2eb4fe1f931cec8
699,069
def get_db_path(spider_dir, db_id): """ Return path to SQLite database file. Args: spider_dir: path to SPIDER benchmark db_id: database identifier Returns: path to SQLite database file """ return f'{spider_dir}/database/{db_id}/{db_id}.sqlite'
9069b673df1b3c929c249319339a84eaaf398c33
699,070
def pop_indices(lst, indices): """ pop the lst given a list or tuple of indices. this function modifies lst directly inplace. >>> pop_indices([1,2,3,4,5,6], [0,4,5]) >>> [2, 3, 4] """ for n in sorted(indices, reverse=True): lst.pop(n) return lst
4cfeedfd211ba4578d877004acdec061c7727d78
699,071
def is_specific_hemorrhage(df): """If a specific hemorrhage present or suspected, then hemorrhage NOS changed to absent.""" hemorrhages = [ "epidural_hemorrhage", "subarachnoid_hemorrhage", "subdural_hemorrhage", ] present = ["present", "suspected"] specific_hemorrhage = len( df.loc[ ( (df["target_group"].isin(hemorrhages)) & (df["modifier_group"].isin(present)) ) ] ) modifiers = df.loc[ ((df["target_group"] == "hemorrhage")), "modifier_phrase" ].str.cat(sep=", ") if (specific_hemorrhage > 0) and (len(df["target_group"].isin(["hemorrhage"])) > 0): df.loc[(df["target_group"] == "hemorrhage"), "modifier_group"] = "absent" df.loc[(df["target_group"] == "hemorrhage"), "modifier_phrase"] = ( modifiers + ", is_specific_hemorrhage" ) return df
404a8162e9225055bf1b3ea493d6529ea5e90767
699,072
def new_format_converter(df, logger): """Returns a dataframe.""" df = df.rename(columns={"Plant Name": "Hour"}) df["Hour"] = df["Hour"].astype(str) df = df.reset_index(drop=True) df = df.set_index("Hour") total_index = df.columns.get_loc("Total (MW)") df = df.iloc[:, total_index:] try: time_index = df.index.get_loc("24:00") except KeyError: raise RuntimeError( "Structure of xlsm file for BD has altered, unable to parse." ) df = df.iloc[: time_index + 1] # check for new columns if df.shape[1] != 12: logger.warning( "New data columns may be present xlsm file.", extra={"key": "BD"} ) return df
d18178046e12a074fdfa05c8ad4dbdbe9993d003
699,073
def _BackslashEscape(s): """Double up backslashes. Useful for strings about to be globbed and strings about to be IFS escaped. """ return s.replace('\\', '\\\\') # Similar to GlobEscape and splitter.Escape(). escaped = '' for c in s: if c == '\\': escaped += '\\' escaped += c return escaped
4c107203117d699c65fd00158913914ae6530b97
699,074
import collections def _collect_traces(raw_traces, trace_factory): """Postprocesses pytype's opcode traces.""" out = collections.defaultdict(list) for op, symbol, data in raw_traces: out[op.line].append(trace_factory(op.name, symbol, data)) return out
6f656073c17c9c5c1d6a33c37dd6a23a5525aa64
699,075
def intrange(arg): """ convert a command-line argument to a list of integers """ acceptable_chars = [str(x) for x in range(10)] acceptable_chars.append("-") partial = [] first = None msg = "Could not convert {0} to index range.".format(arg) err = TypeError(msg) for char in arg: if char not in acceptable_chars: raise err if char == "-": if len(partial) == 0: raise err elif first is None: first = int("".join(partial)) partial = [] else: # this means there's a second -, which is not ok raise err else: partial.append(char) second = None if first is None: first = int("".join(partial)) elif len(partial) == 0: raise err else: second = int("".join(partial)) if second is None: return [first] elif second - first >= 0: return range(first, second+1) else: return range(first, second-1, -1)
8d3bbe9b32207ae2c18080601761b84c9e87ec9f
699,076
import subprocess def s3_addition(bucket_name, region): """ add a new s3 """ output = subprocess.getstatusoutput("aws s3api create-bucket --bucket {0} --region {1} --create-bucket-configuration LocationConstraint={1}".format(bucket_name,region)) return output
ae08985c1b34446fb54d1ef547420c4ca15d6456
699,077
import re def read_accounts_file(account_file): """ Process each line in the specified account file looking for account definitions. An account definition is a line containing the word 'account' followed by a valid account name, e.g: account Expenses account Expenses:Utilities All other lines are ignored. """ accounts = [] pattern = re.compile("^\s*account\s+([:A-Za-z0-9-_ ]+)$") with open(account_file, "r", encoding='utf-8') as f: for line in f.readlines(): mo = pattern.match(line) if mo: accounts.append(mo.group(1)) return accounts
f917958201cd66f6b04bd9307611bb282b25f3f6
699,078
def forceresolution( numgridptscuberoot, lengthcuberoot ) : """ returns the force resolution of the PM part in units of the input boxsize args: numgridptscuberoot: lengthcuberoot : return : force resolution (distance) in units usef for the box size """ return lengthcuberoot/numgridptscuberoot
d14760ac863bd1409b9df40fe0ff19ff44b508ca
699,080
def set_up_folder_name_1M ( model_file , date_run ): """ Produce log_file, plot_folder based on model_file (file name ending with _sd.pt) If model_file is epoch_1_sd.pt, date_run is 0913: plot_folder 'plot_2d_epoch_1', log file 'log_0913_2d_epoch_1' """ # if model_file == '': model_pure = f'ep{ep_ind}' model_pure = model_file[:model_file.rfind('_sd.pt')] plot_folder = f'plot_2d_{model_pure}' log_file = f'log_{date_run}_2d_{model_pure}' # .out This is the log file, recording the printing return log_file, plot_folder
d7dd24789476279ea8d5fe0b32627aa049731ef7
699,081
from typing import List import glob def get_all_file_from_directory(directory: str, file_dsc: str) -> List[str]: """ Get paths of all files matching file_dsc in directory """ template = f"{directory}{file_dsc}" file_paths = glob.glob(template) return file_paths
7a3115793a5a59bc8f6ee655315ea87857338c47
699,082
def naked(val): """ Given a string strip off all white space & quotes """ return val.strip(' "\'\t')
1875b38f05fa0c8b540ece0265354293f275b3ea
699,083
def handler(store, default=True, internal=False, passive=False): """ Decorator for setting up a handler. This puts the handler into the handler store which can then be used to look up all handlers and information about them such as which are default, passive, etc. Currently there are two handler stores, one for connection handlers in nogotofail.mitm.connection.handlers.connection.store and one for data handlers in nogotofail.mitm.connection.handlers.data.store Arguments: store -- the HandlerStore to store information about the handler in default -- if the handler should be used by default internal -- if the handler is used internally. These are always added and not displayed in --help or sent to the client. passive -- if the handler is passive and does no modification. """ def wrapper(cls): cls.passive = passive if internal: store.internal.append(cls) else: store.map[cls.name] = cls store.all.append(cls) if default: store.default.append(cls) return cls return wrapper
cf7c4c8847539be57c5680d87f80414bb6ab0164
699,085
import sys def ref_c_2(obj): """+ 2""" return sys.getrefcount(obj)
aafb6caded13abcf9a3c72f5f932e4215556b2ea
699,086
def human_readable_filesize(size): """Convert file size in bytes to human readable format Args: size (int): Size in bytes Returns: str: Human readable file-size, i.e. 567.4 KB (580984 bytes) """ if size < 1024: return "{} bytes".format(size) remain = float(size) for unit in ["B", "KB", "MB", "GB", "TB"]: if remain < 1024.0: return "{:.1f} {} ({:d} bytes)".format(remain, unit, size) remain /= 1024.0
c8eefdf9145bfb5c937b740fec9e6b437704aa5b
699,087
def interchanging_key_and_value(x): """creating a function for interchanging key and value""" return(x[1],x[0])
6478d48d698bb741ff2a24634b5b833ef624f79f
699,088
import re def tokenize(s): """ tokenize """ #数字替换为<num> s = re.sub('\d+', '<num>', s).lower() tokens = s.split(' ') return tokens
0f76b68c75c474a4f4ee692ae1124127b93ebf6e
699,089
import sys def get_outputfile(argument): """ Gets the outputfile from the raw argument. """ if argument != None: return open(argument, 'w') return sys.stdout
173428cd10bdbae812ea29aed42b92155f567db3
699,090
from typing import Generator def to_list(x, repeat=1): """convert x to list object Args: x: any object to convert repeat: if x is to make as [x], repeat `repeat` elements in the list """ if isinstance(x, (Generator, tuple, set)): return list(x) elif isinstance(x, list): return x elif isinstance(x, dict): return list(x.values()) elif x is not None: return [x] * repeat else: return []
79841d76cd0eba5a2e92fc0992516f59113c3f9b
699,091
def try_key(dict_to_try, key_for_dict): """Either returns key value or empty string.""" if key_for_dict not in dict_to_try: return '' return dict_to_try[key_for_dict]
a7486bd4933301278941fb7ee2001890221fcbd9
699,092
import sys def utf8_replace(txt): """ Replace unsupported characters in unicode string, returns unicode. """ sse = sys.stdout.encoding txt = txt.encode(sse, "replace").decode("utf8", "ignore") return txt
935731ba5d5b5cb42958ff2b79649a9641fcbc13
699,093
def covariance_matrix(X): """ Args: X (ndarray) (m, n) Return: cov_mat (ndarray) (n, n): covariance matrix of X """ m = X.shape[0] return (X.T @ X) / m
128d9bfe12169cb344c150bf9f8f05565b5a8831
699,094
def sigmoid_deriv(x): """ Calculates the sigmoid derivative for the given value. :param x: Values whose derivatives should be calculated :return: Derivatives for given values """ return x * (1. - x)
8a7a1005fafb1b34c17c6ce4a8a04f80334e396a
699,095
from typing import Mapping def UpdateDict( target, override ): """Apply the updates in |override| to the dict |target|. This is like dict.update, but recursive. i.e. if the existing element is a dict, then override elements of the sub-dict rather than wholesale replacing. e.g. UpdateDict( { 'outer': { 'inner': { 'key': 'oldValue', 'existingKey': True } } }, { 'outer': { 'inner': { 'key': 'newValue' } }, 'newKey': { 'newDict': True }, } ) yields: { 'outer': { 'inner': { 'key': 'newValue', 'existingKey': True } }, 'newKey': { newDict: True } } """ for key, value in override.items(): current_value = target.get( key ) if not isinstance( current_value, Mapping ): target[ key ] = value elif isinstance( value, Mapping ): target[ key ] = UpdateDict( current_value, value ) else: target[ key ] = value return target
a07a3d7989e227ff1759271248777e167b7e5467
699,096
from pathlib import Path import uuid def get_filename(filename): """CKEditor - Filename Generator""" extension = Path(filename).suffix return '{0}{1}'.format(uuid.uuid4(), extension)
50988191c0145e686b38306a6f1bfd686d4dff25
699,097
def dss_target(request): """ This is a parameterized fixture. Its value will be set with the different DSS target (DSS7, DSS8 ...) that are specified in the configuration file. It returns the value of the considered DSS target for the test. Here it is only used by other fixtures, but one could use it as a test function parameter to access its value inside the test function. Args: request: The object to introspect the “requesting” test function, class or module context Returns: The string corresponding to the considered DSS target for the test to be executed """ return request.param
0aa9f85160280032dfdea872b7c9a147a5717523
699,098
import re def _clean_name(name): """ This function sets the name of a recipe to the standard naming convention specified """ name = re.sub(r'\s', "_", name) return name.lower()
940792220aa6eabfcf067c6450f30bca66d1b214
699,099
def label_data(dictionary, images): """ Labels the data depending on patient's diagnosis Parameters ---------- dictionary: Dict with patient information images: Names of images to label Returns ------- Labeled data """ data = [] last_patient = '' aux = [] for img in images: patientid = img[5:15] if last_patient == '': last_patient = patientid aux.append(img) continue if patientid == last_patient: aux.append(img) else: last_date = aux[-1][16:22] if last_patient + last_date in dictionary: dx = dictionary[last_patient + last_date] for a in aux: data.append((a, dx)) aux = [img] last_patient = patientid return data
01eb965d4cff7a8d4242eeb57e43fe3d72ee74f9
699,100
def gym_size(gym): """Returns the size of the gym.""" return len(gym)
b31a46a7b56e973da8dfa0cac9950fad14b48622
699,101
import os def GetFileList(root): """Gets a normalized list of files under |root|.""" assert not os.path.isabs(root) assert os.path.normpath(root) == root file_list = [] for base, _, files in os.walk(root): paths = [os.path.join(base, f) for f in files] file_list.extend(x.lower() for x in paths) return sorted(file_list)
2a81659a26683f7118dfa18e93c1ee18e03ee59f
699,102
def is_call_id_in_video_state(log, ad, call_id, video_state): """Return is the call_id is in expected video_state Args: log: logger object ad: android_device object call_id: call id video_state: valid VIDEO_STATE Returns: True is call_id in expected video_state; False if not. """ return video_state == ad.droid.telecomCallVideoGetState(call_id)
d0da323a21a23d461fc2aec06ac5d0bebac7cbef
699,103
def _extract_spots_outside_foci(cell_cyt_mask, spots_out_foci): """ Extract spots detected outside foci, in a specific cell. Parameters ---------- cell_cyt_mask : np.ndarray, bool Binary mask of the cell with shape (y, x). spots_out_foci : np.ndarray, np.int64 Coordinate of the spots detected outside foci, with shape (nb_spots, 4). One coordinate per dimension (zyx coordinates) plus a default index (-1 for mRNAs spotted outside a foci). Returns ------- spots_out_foci_cell : np.ndarray, np.int64 Coordinate of the spots detected outside foci in the cell, with shape (nb_spots, 4). One coordinate per dimension (zyx coordinates) plus the index of the foci. """ # get coordinates of rna outside foci mask_spots_to_keep = cell_cyt_mask[spots_out_foci[:, 1], spots_out_foci[:, 2]] spots_out_foci_cell = spots_out_foci[mask_spots_to_keep] return spots_out_foci_cell
d4e06d9ad14ced3b8984b71c32d9d3b1e0cebaae
699,104
import os def read_markdown(*path): """folder_path,...,file_name""" try: path_com = os.path.join(*path) if os.path.isfile(path_com): with open(path_com, 'r', encoding='utf-8') as file: content = file.read() print(path_com, 'read.') return content else: print("File name error!") return None except Exception as e: print("failed to read") print(Exception, ":", e) return None
6c0fd2e4b09e9a30c00de632e758034f01334b45
699,105
import io def get_custom_stopwords(stop_words_file): """ 创建停用词list, :param stop_words_file: 停用词文件 :return: """ with io.open(stop_words_file, encoding='utf-8')as f: stopwords = f.read() stopwords_list = stopwords.split('\n') custom_stopwords_list = [i for i in stopwords_list] return custom_stopwords_list
d2e25e44815b6769688493a96852cc537d1127bf
699,106
def extract_image_number(filename: str, from_file: bool) -> int: """ finds the number of the image from the original images of LITIV 2014 dataset. :param filename: name of the image file. :param from_file: filename is read from a ground-truth file or not. :return: number. """ number = filename[-8:-4] if from_file: number = filename[-9:-5] if not number.isdigit(): number = number[-3:] if not number.isdigit(): number = number[-2:] return int(number)
e55431beffb02959907106a01596310812ba7363
699,107
def subarraySum(nums, k): """ Given an array of integers and an integer k, you need to find the total number of continuous subarrays whose sum equals to k. """ count = 0 s = 0 for i in range(len(nums)): s += nums[i] if s % k == 0: count += 1 return count
6bbad5ec5511f8563fc951aa916f76a6297946ba
699,108
def fail_on_npm_install(): """ Used to simulate an error when executing "npm install" """ return 1
ea0b2b14487acf8b9833e84374de469a1b22b61a
699,109
def calculateNetIncome(gross, state): """ Calculate the net income after federal and state tax :param gross: Gross Income :param state: State Name :return: Net Income """ state_tax = {'LA': 10, 'SA': 0, 'NY': 9} # Calculate net income after federal tax net = gross - (gross * .10) # Calculate net income after state tax if state in state_tax: net = net - (gross * state_tax[state]/100) print("Your net income after all the heavy taxes is: " + str(net)) return net else: print("State not in the list") return None
915528d8bfd15c18003eaeb8f6b3f1e8ad5565a0
699,110
def field_size(field_label): """ Helper function to determine the size of a binary table field Parameters ---------- field_label : PVLModule The field label Returns ------- int : The size of the one entry in bytes """ data_sizes = { 'Integer' : 4, 'Double' : 8, 'Real' : 4, 'Text' : 1 } return data_sizes[field_label['Type']] * field_label['Size']
afaff70bc18d4d9d023fb7c89d45560f1a691bcf
699,111
def leading_by(agents, player): """Determine difference between player's score and closest opponent.""" scores = [a.score() for a in agents] player_score = scores[player] scores.sort() if player_score == scores[-1]: return player_score - scores[-2] else: return player_score - scores[-1]
e97aa0a1109bb2ba5f276f34030ea1f93022046f
699,113
def get_bid2sid(offsets, stc_lens): """ :param offsets: list of 1 tensor, offsets by doc :param stc_lens: list of 1 list, stc length :return: bid2sid: dict of int, blank id 2 stc id, both start from 0 """ bid2sid, acc_len = dict(), 0 sid = 0 for bid, offset in enumerate(offsets[0]): while offset >= acc_len + stc_lens[0][sid]: acc_len += stc_lens[0][sid] sid += 1 if acc_len <= offset < acc_len + stc_lens[0][sid]: bid2sid[bid] = sid return bid2sid
a5a96d3ea6c7fcb17a97df706799fe0716ae659b
699,114
def mk_seealso( function_name: str, role: str = "func", prefix: str = "\n\n", module_location: str = ".", ) -> str: """ Returns a sphinx `seealso` pointing to a function. Intended to be used for building custom fitting model docstrings. .. admonition:: Usage example for a custom fitting model :class: dropdown, tip See the usage example at the end of the :class:`~ResonatorModel` source-code: .. literalinclude:: ../quantify_core/analysis/fitting_models.py :pyobject: ResonatorModel Parameters ---------- function_name name of the function to point to role a sphinx role, e.g. :code:`"func"` prefix string preceding the `seealso` module_location can be used to indicate a function outside this module, e.g., :code:`my_module.submodule` which contains the function. Returns ------- : resulting string """ return f"{prefix}.. seealso:: :{role}:`~{module_location}{function_name}`\n"
0165599829e8edd3f75adcda9402069066a0381a
699,115
def mock_xibo_api(mocker): """Return a mock XiboApi.""" return mocker.MagicMock()
8c2ad1566ad716d9fb4dd680a624e59f89950a0a
699,117
def get_corpus_archive_name(cycle: int) -> str: """Returns a corpus archive name given a cycle.""" return 'corpus-archive-%04d.tar.gz' % cycle
3068ce88bd85f2c21819da697a2b1147f547c509
699,118
def array_replace(space, w_arr, args_w): """ Replaces elements from passed arrays into the first array """ for i, w_arr in enumerate(args_w): if w_arr.tp != space.tp_array: space.ec.warn("array_replace_recursive(): Argument #%d " "should be an array" % (i+1)) return space.w_Null for w_arg in args_w: with space.iter(w_arg) as w_iter: while not w_iter.done(): w_key, w_value = w_iter.next_item(space) w_arr = space.setitem(w_arr, w_key, w_value) return w_arr
bab04d17733a12418b248e6f72e9dc053b263ac7
699,119
import re def extract_airlines_from_flight_text(flight_text): """ :param flight_text: Raw flight text string, e.g., "3:45 PM – 8:15 PM+1\nUnited\n13h 30m\nSFO–PVG\nNonstop\n$4,823", "12:51 PM – 4:50 PM+1\nDelta\nChina Southern\n12h 59m\nSEA–PVG\nNonstop\n$4,197", "2:10 AM – 1:25 PM+1\nSeparate tickets booked together\nEVA Air, Spring\n20h 15m\nSEA–PVG\n1 stop\n6h 15m TPE\n$1,194" :return: A list of airlines, e.g., ["United"], ["Delta", "China Southern"], ["EVA Air", "Spring"] """ airlines = [] # We escape flight_text.split("\n")[0] which indicates flight time range. for airline_candidate in flight_text.split("\n")[1:]: if airline_candidate == "Separate tickets booked together": continue if re.match(r"(\d+h )?\d+m", airline_candidate): # The flight time length indicates the end of airline info. break airlines.extend(airline_candidate.split(",")) return airlines
188f040c15e62f4eace72cb90ef74825c4b803fc
699,120
def get_inputs(inputs, mask=None): """Extract and normalize the arrays from inputs""" gmax = inputs[0][:,:,1].max() o = inputs[0][:,:,0]/gmax n = inputs[1][:,:,0]/gmax return o,n
329dfce70d65dec8d5f177239b7f88c977519867
699,121
import torch def _bbox_pre_decode(points: torch.Tensor, bbox_pred: torch.Tensor, stride: torch.Tensor): """compute real bboxes.""" points = points[..., :2] bbox_pos_center = torch.cat([points, points], dim=-1) bboxes = bbox_pred * stride + bbox_pos_center return bboxes
27aec66f69c31e1147dc544808f4ce10954b1272
699,122
def formatTime(t): """Check if hour, minute, second variable has format hh, mm or ss if not change format. :param t: string time hh, mm or ss :return: string time in correct format eg. hh=02 instead of hh=2 """ if len(t) < 2: t = "0" + t return t
8011fc25a001964af1f1e0b0cb4e54f7d7aec721
699,123
def song_value(songtitle, artist, chart): """ Returns the value of a song in a chart Run through one billboard chart objekt and return the "value" of the song with the specified title and artist in this chart. If the song is not on the chart 0 is returend. Songtitle and artist should be normalized with normalize_str! Example: song_value('songtitle', 'artistname', chart) """ for song_tuple in chart: if len(songtitle) >= 4: if (songtitle in song_tuple[1]) and (artist in song_tuple[2]): return song_tuple[0] else: if (songtitle == song_tuple[1]) and (artist in song_tuple[2]): return song_tuple[0] return 0
637b5a83938729e1ecf131804c12f38f7aba6247
699,124
def _create_connection(network, ip_address, mac_address, ip_allocation_mode, primary_interface=False, nic_order=0): """ repack fields to dict """ return {'network': network, 'ip_address': ip_address, 'mac_address': mac_address, 'ip_allocation_mode': ip_allocation_mode, 'primary_interface': primary_interface, 'nic_order': nic_order}
da2c38d853ab9ba458031265e6ca6d33f2a54185
699,125
def remove_signature(message): """Remove the 3 Letter signature and the '/' found at the beginning of a valid message""" return message[1:]
a802dc5abfea09e05fad51936dd76e17719900e7
699,126
def find_adv_chunk(doc): """ Returns a dictionary representing a adverb chunk with a subject, a verb and an adverb """ for noun_chunk in doc.noun_chunks: # print("noun_chunk is {}".format(noun_chunk)) if noun_chunk.root.dep_ != 'nsubj': continue subj = noun_chunk.root if subj.head.dep_ == 'ROOT': verb = subj.head # print("verb is {}".format(verb)) for child in verb.children: # print("child is {}".format(child)) # print("child dep is {}".format(child.dep_)) if child.dep_ == "advmod": adverb = child adverb_chunk = { "subject": subj, "verb": verb, "adverb": adverb } return adverb_chunk return None
64775754733a6b5a3ce342b1f0475b188cb00319
699,127
import textwrap def get_comment_from_location(location): """Return comment text from location. Args: location: descriptor_pb2.SourceCodeInfo.Location instance to get comment from. Returns: Comment as string. """ return textwrap.dedent(location.leading_comments or location.trailing_comments)
d86213ddee50128c8363d2a31a740c5677244612
699,128
def check_scale_xsmode(xsmode): """checking if the scale of xsmode assumes ESLOG(log) or ESLIN(linear) Args: xsmode: xsmode Return: ESLOG/ESLIN/UNKNOWN """ if xsmode=="lpf" or xsmode=="LPF" or xsmode=="modit" or xsmode=="MODIT" or xsmode=="redit" or xsmode=="REDIT": print("xsmode assumes ESLOG: mode=",xsmode) return "ESLOG" elif xsmode=="dit" or xsmode=="DIT": print("xsmode assumes ESLIN: mode=",xsmode) return "ESLIN" else: return "UNKNOWN"
db6097abc196677063cb6851ce6036fb6e9b61f9
699,130
def get_name(test): """Gets the name of a test. PARAMETERS: test -- dict; test cases for a question. Expected to contain a key 'name', which either maps to a string or a iterable of strings (in which case the first string will be used) RETURNS: str; the name of the test """ if type(test['name']) == str: return test['name'] return test['name'][0]
e9d5a5013b062077c499e0351786819c9bd6bd90
699,131
def check_if_points_escape_box(u, box_boundaries): """ Determine if points u in 2D plane have escaped from box_boundaries limits. Parameters ---------- u : ndarray, shape(n, 2), Points in plane. box_boundaries : list of 2 tuples of floats, Values are interpreted as [[x_min,x_max], [y_min, y_max]]. Returns ------- u_indices : ndarray of bools, shape(n, 2), True/False for points inside/outside the box_boundaries respectively. """ x, y = u.T # Escape condition box_x_min, box_x_max = box_boundaries[0] box_y_min, box_y_max = box_boundaries[1] u_indices = (x >= box_x_min) & (x <= box_x_max) & (y >= box_y_min) & (y <= box_y_max) return u_indices
41b04b72821bc39f387c7557be1b11f14e65c71d
699,132
from typing import Any import random def assign_new_ids(items: dict[str, Any]) -> list[str]: """ Assigns new IDs to any item ID that starts with "NEW:" or is an empty string. Will modify the dict in place. Will return a list of new item IDs assigned. """ new_items = [] for item_id in items: if item_id in ('', 'NEW') or item_id.startswith('NEW:'): new_items.append(item_id) for i, item_id in enumerate(new_items): item_range = (10 ** (max(len(str(len(items))), 5) + 1)) - 1 item_range_l = len(str(item_range)) new_id: str = '' while not new_id or new_id in items: new_id = str(random.randint(0, item_range)).zfill(item_range_l) items[new_id] = items.pop(item_id) new_items[i] = new_id return new_items
4fb3466566f0fd9981e1c99cb429da629c843dab
699,133
def isatty(stream): """Check if a stream is attached to a console. Args: stream (:obj:`io.IOBase`): Stream to check. Returns: :obj:`bool` """ return stream.isatty() if hasattr(stream, "isatty") else False
4ec38e35413f0f8af76da5e2681f5502acf3d61e
699,134
def remove_whitespace(string: str) -> str: """ This function replace whitespaces for void string -> '' Input: string with (or without) whitespace Output: string without whitespace """ try: if len(string) == 0 or (len(string) == 1 and string != ' '): return string.lower() else: new_string = string.replace(' ', '').replace('(', '').replace(')', '').replace('.', '').replace(',', '') return new_string.lower() except TypeError: exit('Invalid entry...')
6c5db5739913a9c31d70797a220e65ba511e7567
699,135
def parameter_indices(*params): """ Parameter indices """ param_inds = dict([("P_kna", 0), ("g_K1", 1), ("g_Kr", 2), ("g_Ks", 3),\ ("g_Na", 4), ("g_bna", 5), ("g_CaL", 6), ("g_bca", 7), ("g_to", 8),\ ("K_mNa", 9), ("K_mk", 10), ("P_NaK", 11), ("K_NaCa", 12), ("K_sat",\ 13), ("Km_Ca", 14), ("Km_Nai", 15), ("alpha", 16), ("gamma", 17),\ ("K_pCa", 18), ("g_pCa", 19), ("g_pK", 20), ("Buf_c", 21), ("Buf_sr",\ 22), ("Ca_o", 23), ("K_buf_c", 24), ("K_buf_sr", 25), ("K_up", 26),\ ("V_leak", 27), ("V_sr", 28), ("Vmax_up", 29), ("a_rel", 30),\ ("b_rel", 31), ("c_rel", 32), ("tau_g", 33), ("Na_o", 34), ("Cm",\ 35), ("F", 36), ("R", 37), ("T", 38), ("V_c", 39), ("stim_amplitude",\ 40), ("stim_duration", 41), ("stim_period", 42), ("stim_start", 43),\ ("K_o", 44)]) indices = [] for param in params: if param not in param_inds: raise ValueError("Unknown param: '{0}'".format(param)) indices.append(param_inds[param]) if len(indices)>1: return indices else: return indices[0]
6012452a48fc770b6da28a6ac09817a3c299282b
699,136
def egcd(a, b): """ Calculate the extended Euclidean algorithm. ax + by = gcd(a,b) Args: a (int): An integer. b (int): Another integer. Returns: int: Greatest common denominator. int: x coefficient of Bezout's identity. int: y coefficient of Bezout's identity. """ if a == 0: return (b, 0, 1) else: g, y, x = egcd(b % a, a) return (g, x - (b // a) * y, y)
87b35ff4e28529d1de2a6fb0063b69cee5dfec74
699,137
import os def maybe_add_extension(filename): """add .tex extension if needed""" if os.path.exists(filename): return filename elif os.path.exists(filename + '.tex'): return filename + '.tex'
e72a0a2905b2be5a09c9c3e1c7d8fd17b9fb05d9
699,138
def old(sym): """ Return the "old" version of symbol "sym", that is, the one representing "sym" in the pre_state. """ return sym.prefix('old_')
4cc24bd0448195ee1c373106679177ce428e5937
699,139
import os def validate_file(parser, arg): """Check that the files provided exist.""" if not os.path.exists(arg): parser.error("The file %s doesn't exist" % arg) else: return arg
9bc53d22d0a4a2447be31bee5a9f7ab60f8bc7a7
699,141
import os def reported_news(file_paths): """Check if Misc/NEWS has been changed.""" return os.path.join('Misc', 'NEWS') in file_paths
95416284bbb0913d8edf4075f4577b56b9dee5ea
699,142
import sys def cont_from_future(future): """Create continuation from `Future` object """ def cont_from_future(out_done, out_error): def done_callback(future): try: out_done(future.result()) except Exception: out_error(sys.exc_info()) future.add_done_callback(done_callback) return cont_from_future
9b12c4c20a6ce4f9027ca49f8d9369fd374fe32b
699,143
def _remove_stopwords(line, nlp): """ Helper function for removing stopwords from the given text line. """ line_nlp = nlp(line) line_tokens = [tok.text for tok in line_nlp] filtered_line = list() # Filter for tok in line_tokens: lexeme = nlp.vocab[tok] if not lexeme.is_stop: filtered_line.append(tok) return ' '.join(filtered_line)
58932dc254874ea7a9e93e7e8d7fbf9f306f740b
699,144
def dict_fetch_all_by_month(cursor): """ We need the data in this format data = [ Month: "jan", "type1": value , "type2": value, Month: "feb", "type1": value , "type2": value, ...... ] do the below code helps to build the dictionary on that method """ collectors = [] is_the_data_empty = True cursor_fetch = cursor.fetchall() types = [rows[0] for rows in cursor_fetch] # built a template of dictionary for col in cursor.description: temp_collector = {} if col[0] != 'Type': temp_collector['month'] = col[0] for type in types: temp_collector[type] = '' collectors.append(temp_collector) # Now populate the row for rows in cursor_fetch: type = None index = 0 for row in rows: if index == 0: type = row else: if row > 0: is_the_data_empty = False collectors[index-1][type] = row index = index + 1 return [{ 'collectors': collectors, 'is_the_data_empty': is_the_data_empty }]
e1056dd16310915d6acb3ae5570df85920681da3
699,145
from typing import Dict def dict_from_text_file(fpath: str) -> Dict[str, float]: """Parse a two-column CSV file into a dictionary of string keys and float values. Will fail an assertion if the second column contains values which are not valid floats. Args: fpath (str): Path to file to parse. Returns: Dict[str, float]: Dictionary of string keys and float values. """ out_dict: Dict[str, float] = {} with open(fpath, "r") as fp: for line in fp: comps = list(map(lambda elem: elem.strip(), line.strip().split(","))) assert(len(comps) == 2) key = comps[0] val = comps[1] out_dict[key] = float(val) return out_dict
5baf29832e0a44c8131a97168cde796757c05bfa
699,146
def get_PSF_model(psf_models, n_psf, current_idx): """Get a single PSF model from the model(s) previously instantiated Parameters ---------- psf_model : list list of PSF model(s) n_psf : int number of PSF model(s) current_idx : int Returns ------- lenstronomy.PSF instance a single PSF model """ return psf_models[current_idx%n_psf]
3848ff065fecea79be7e13ed2ace692f855d3a4d
699,147
import posixpath def get_fuzzer_benchmark_covered_regions_filestore_path( fuzzer: str, benchmark: str, exp_filestore_path: str) -> str: """Returns the path to the covered regions json file in the |filestore| for |fuzzer| and |benchmark|.""" return posixpath.join(exp_filestore_path, 'coverage', 'data', benchmark, fuzzer, 'covered_regions.json')
f999a81c53d961a2ed38a7941d7dd6912dae9621
699,148
def breakdown(data): """Break down each row into context-utterance pairs. Each pair is labeled to indicate truth (1.0) vs distraction (0.0). Output is a native array with format : [context, utterance, label]""" output = [] for row in data: context = row[0] ground_truth_utterance = row[1] output.append([list(context), ground_truth_utterance, 1.0]) for i in range(2,11): output.append([list(context), row[i], 0.0]) return output
613cc8ca3d65d8eb22631114a88a934f67f37867
699,149
def string_escape(text: str) -> str: """Escape values special to javascript in strings. With this we should be able to use something like: elem.evaluateJavaScript("this.value='{}'".format(string_escape(...))) And all values should work. """ # This is a list of tuples because order matters, and using OrderedDict # makes no sense because we don't actually need dict-like properties. replacements = ( ('\\', r'\\'), # First escape all literal \ signs as \\. ("'", r"\'"), # Then escape ' and " as \' and \". ('"', r'\"'), # (note it won't hurt when we escape the wrong one). ('\n', r'\n'), # We also need to escape newlines for some reason. ('\r', r'\r'), ('\x00', r'\x00'), ('\ufeff', r'\ufeff'), # https://stackoverflow.com/questions/2965293/ ('\u2028', r'\u2028'), ('\u2029', r'\u2029'), ) for orig, repl in replacements: text = text.replace(orig, repl) return text
f85d02527f3a9fdb9373f2359c2fce86b86a5a89
699,150
def check_permutation(str1, str2): """ 1.2 Check Permutation: Given two strings, write a method to decide if one is a permutation of the other. Complexity: O(n) time, O(n) space """ h = {} for c in str1: if c not in h: h[c] = 0 h[c] += 1 for c in str2: if c not in h: return False h[c] -= 1 for (_, count) in h.items(): if count != 0: return False return True
4da48752e963c05115b3255b03749e8579a9f8ff
699,151
def climb_stairs(n: int) -> int: """ Args: n: number of steps of staircase Returns: Distinct ways to climb a n step staircase Raises: AssertionError: n not positive integer """ fmt = "n needs to be positive integer, your input {}" assert isinstance(n, int) and n > 0, fmt.format(n) if n == 1: return 1 dp = [0] * (n + 1) dp[0], dp[1] = (1, 1) for i in range(2, n + 1): dp[i] = dp[i - 1] + dp[i - 2] return dp[n]
71ac88a1f475e8fe2da8e525f7924e289dbf28e4
699,152
import pandas as pd from os import listdir def sipakmed_to_csv(s_path): """ This function creates a single .csv file from individual .dat files Inputs: containing folder string """ sipakmed_df = pd.DataFrame() #initialize empty dataframe for file in listdir(s_path)[0:]: if file.endswith(".dat"): df2 = pd.read_table(s_path+file, sep=',',header=None) # add features if 'NUC' in file: df2[28] = 'NUC' else: df2[28] = 'nuc' df2[29] = file[0].lower() if file[0].lower() in 'sp': df2[30] = 1 else: df2[30] = 0 # concatenate sipakmed_df = pd.concat([sipakmed_df,df2]) # create headers sipakmed_df.columns = ['cluster_id','image_id','area','major_axis_length','minor_axis_length','eccentricity','orientation','equivalent_diameter','solidity','extent','meanI_R','meanC_R','smooth_R','moment-3-R','uniformity-R','entropy-R','meanI_G','meanC_G','smooth_G','moment-3-G','uniformity-G','entropy-G','meanI_B','meanC_B','smooth_B','moment-3-B','uniformity-B','entropy-B','Nucleus/Cytoplasm','Class','Normal'] # save to file sipakmed_df.to_csv('../data/processed/Sipakmed_existing_database.csv') return sipakmed_df
6b9ef7e7e9602a59ff63e6cdb6d4de1ae4dc2e4d
699,154
import codecs def raw_buffered_line_counter(path, encoding="utf-8", buffer_size=1024 * 1024): """ Fast way to count the number of lines in a file. :param Path path: Path to file. :param str encoding: Encoding used in file. :param int buffer_size: Size of buffer for loading. :return: int """ # Open file f = codecs.open(str(path), encoding=encoding, mode="r") # Reader generator def _reader_generator(reader): b = reader(buffer_size) while b: yield b b = reader(buffer_size) # Reader used file_read = f.raw.read # Count lines line_count = sum(buf.count(b'\n') for buf in _reader_generator(file_read)) + 1 return line_count
700135dc1a5b5bf4a807469e34f56213d7b1265a
699,155
def check_aggregate( df, variable, components=None, exclude_on_fail=False, multiplier=1, **kwargs ): """Check whether the timeseries values match the aggregation of sub-categories Parameters ---------- df : IamDataFrame args : passed to :meth:`IamDataFrame.check_aggregate` kwargs : used for downselecting IamDataFrame passed to :meth:`IamDataFrame.filter` """ fdf = df.filter(**kwargs) if len(fdf.data) > 0: vdf = fdf.check_aggregate( variable=variable, components=components, exclude_on_fail=exclude_on_fail, multiplier=multiplier, ) df.meta["exclude"] |= fdf.meta["exclude"] # update if any excluded return vdf
969dbd8e221c89e172fcdde0d6a91469bfead1d8
699,156
def consider_trip_number(trip_strategy, total_trips, trip_num): """Determines if the vehicle should charge given trip strategy and current trip :param int trip_strategy: a toggle that determines if should charge on any trip or only after last trip (1-anytrip number, 2-last trip) :param int total_trips: total trips that the vehicle makes :param int trip_num: the trip number of the current trip :return: (*bool*) -- boolean that represents if the vehicle should charge """ if trip_strategy == 1: return True elif trip_strategy == 2: return total_trips == trip_num
b2fe3a43b95fead4ebf65981cfba39857d1bee3c
699,157
import pathlib def as_pathlib(path): """ Converts the supplied path to an pathlib.Path object :param path: The path to convert :return: The path converted to pathlib.Path """ return pathlib.Path(path)
88129771be8c3277320c813502efbcfee6abdc84
699,158
import re def remove_non_alpha_numeric(input_string): """ return string by removing non alpha numric characters from it """ return re.sub(r'\W+', '', input_string)
fc7b203e4937a7a5a80f0d3dc9a2d7bca5de6b45
699,159
from typing import List from typing import Tuple def find_all_comment_segments(src: str) -> List[Tuple[int, int]]: """ Find all comment segments in a database starting from a position return a list of pairs [ (comment start pos, comment end pos) ] where start pos is inclusive and end pos is exclusive """ comment_segments = [] pos = 0 comment_level = 0 current_start = None while pos < len(src): next_comment_open = src.find("$(", pos) next_comment_close = src.find("$)", pos) if next_comment_open != -1: if next_comment_open < next_comment_close or next_comment_close == -1: pos = next_comment_open + 2 comment_level += 1 if comment_level == 1: current_start = next_comment_open continue if next_comment_close != -1: if next_comment_close < next_comment_open or next_comment_open == -1: pos = next_comment_close + 2 assert comment_level != 0, f"incorrectly nested comment at next_comment_close" comment_level -= 1 if comment_level == 0: assert current_start is not None comment_segments.append((current_start, pos)) current_start = None continue break assert comment_level == 0, f"unclosed comment at EOF" return comment_segments
eec33aae79caa029186d3b320f90682b5a9bebd6
699,161
import os def files_in_subdirs(dir_name): """list all files in a dir with all sub dirs. Args: dir_name (str): path to a dir Returns: list: files in a dir. """ ret = [] for r, _, f in os.walk(dir_name): for file in f: ret.append(os.path.abspath(os.path.join(r, file))) return ret
cb632d839909c991bf0736e9e34c2edcf63f9242
699,162
def extract_text(tweet): """Gets the full text from a tweet if it's short or long (extended).""" def get_available_text(t): if t['truncated'] and 'extended_tweet' in t: # if a tweet is retreived in 'compatible' mode, it may be # truncated _without_ the associated extended_tweet #eprint('#%s' % t['id_str']) return t['extended_tweet']['full_text'] else: return t['full_text'] if 'full_text' in t else t['text'] if 'retweeted_status' in tweet: rt = tweet['retweeted_status'] return 'RT @%s: %s' % (rt['user']['screen_name'], extract_text(rt)) if 'quoted_status' in tweet: qt = tweet['quoted_status'] return get_available_text(tweet) + " --> " + extract_text(qt) return get_available_text(tweet)
41660b71e3b020bc25309821b46471e88d79aa49
699,163
def search_linearf(a, x): """ Returns the index of x in a if present, None elsewhere. """ for i in range(len(a)): if a[i] == x: return i return None
949cb93218381a1a45002be03c6b8ca4d00092bf
699,164
def modules(): """Entry point for getting the pyxll modules. Returns a list of module names.""" return [ __name__ ]
2f53cd00387c7533404b8e97a8da34fb3c5811e9
699,165
def gamma_corr(clip, gamma): """ Gamma-correction of a video clip """ def fl(im): corrected = 255 * (1.0 * im / 255) ** gamma return corrected.astype("uint8") return clip.fl_image(fl)
d98d411c63d932b068d80bef63d42289cb0d8468
699,166
def connected_components(graph): """ Given an undirected graph (a 2d array of indices), return a set of connected components, each connected component being an (arbitrarily ordered) array of indices which are connected either directly or indirectly. """ def add_neighbors(el, seen=[]): ''' Find all elements which are connected to el. Return an array which includes these elements and el itself. ''' #seen stores already-visited indices if seen == []: seen = [el] #iterate through the neighbors (x) of el for x in graph[el]: if x not in seen: seen.append(x) #Recursively find neighbors of x add_neighbors(x, seen) return seen #Create a list of indices to iterate through unseen = list(range(len(graph))) sets = [] i = 0 while (unseen != []): #x is the index we are finding the connected component of x = unseen.pop() sets.append([]) #Add neighbors of x to the current connected component for y in add_neighbors(x): sets[i].append(y) #Remove indices which have already been found if y in unseen: unseen.remove(y) i += 1 return sets
e8299cc024dd27414a287ee0afa302ea64c18b68
699,167
def generateCards(numberOfSymb): """ Source: https://fr.wikipedia.org/wiki/Dobble """ nbSymByCard = numberOfSymb nbCards = (nbSymByCard ** 2) - nbSymByCard + 1 cards = [] n = nbSymByCard - 1 t = [] t.append([[(i + 1) + (j * n) for i in range(n)] for j in range(n)]) for ti in range(n - 1): t.append( [ [t[0][((ti + 1) * i) % n][(j + i) % n] for i in range(n)] for j in range(n) ] ) t.append([[t[0][i][j] for i in range(n)] for j in range(n)]) for i in range(n): t[0][i].append(nbCards - n) t[n][i].append(nbCards - n + 1) for ti in range(n - 1): t[ti + 1][i].append(nbCards - n + 1 + ti + 1) t.append([[(i + (nbCards - n)) for i in range(nbSymByCard)]]) for ti in t: cards = cards + ti return cards
5cf56864675668213f593395978fa511d0ed3784
699,168
def get_final_sorted_cavs(dict_all_info, filtered_cavities): """ Uses dict_all_info to rank cavities for further exploitation (FP, export as PDB...) and exclude few more cavities """ # scores will contain the scores of filtered cavities scores = [] # ori_order will contain the original indices of the cavities # in filtered_cavities ori_order = [] for keys, items in dict_all_info.items(): scores.append(dict_all_info[keys]["score"]) ori_order.append(keys) order = sorted(range(len(scores)), key=lambda k: scores[k], reverse = True) final_cavities = filtered_cavities[ori_order][order] return final_cavities, [ori_order[x] for x in order]
01513b4e5c55c7f9ec1e48274103044d92ed2fd5
699,169
import math import torch def loglinspace(a, b, n=100, **kwargs): """Like :meth:`torch.linspace`, but spread the values out in log space, instead of linear space. Different from :meth:`torch.logspace`""" return math.e**torch.linspace(math.log(a), math.log(b), n, **kwargs)
ea456462a0d029f1ada1c4a9e40c9377b86e815d
699,170
def optional_function(func): """Only apply function if input value is not None. Function must be unary. """ def wrapper(x, *args, **kwargs): if x is None: return None return func(x, *args, **kwargs) return wrapper
3db42719579fc6160b282af4e637f72f9df4781e
699,171
def purchaseQuantity_perDate_month(data): """ DOCSTRING: Creates a 'Purchase Quantity per Day' column based on 'Purchase Date' and 'Purchase Quantity' columns, to assist future aggregations and plotting computations. Another new column 'Purchase Quantity per Month' gets created based on 'Month' and 'Purchase Quantity'. These generated columns holds total items sold for each date and month respectively. INPUT: > data : Only accepts Pandas DataFrame or TextParser. OUTPUT: Pandas DataFrame or TextParser with a new column added for assessing total number of items sold per date and month. """ data["Purchase Quantity per Day"] = data.groupby("Purchase Date")["Purchase Quantity"].transform("sum") data["Purchase Quantity per Month"] = data.groupby("Month")["Purchase Quantity"].transform("sum") return data
9a4605e801ca7921065580059ef0309d312c8543
699,172
import argparse def parser_selscan_norm_common(parser=argparse.ArgumentParser()): """ Build a parser to which arguments are added which are common to several selscan functions. """ input_file_help_string = """A list of files delimited by whitespace for joint normalization. Expected format for iHS/nSL files (no header):\n <locus name> <physical pos> <freq> <ihh1/sL1> <ihh2/sL0> <ihs/nsl> Expected format for XP-EHH files (one line header): <locus name> <physical pos> <genetic pos> <freq1> <ihh1> <freq2> <ihh2> <xpehh>""" parser.add_argument("inputFiles", help=input_file_help_string, nargs='+') parser.add_argument('--bins', default=100, type=int, help="""The number of frequency bins in [0,1] for score normalization (default: %(default)s)""") parser.add_argument('--critPercent', default=-1.00, type=float, help="""Set the critical value such that a SNP with iHS in the most extreme CRIT_PERCENT tails (two-tailed) is marked as an extreme SNP. Not used by default (default: %(default)s)""") parser.add_argument('--critValue', default=2.00, type=float, help="""Set the critical value such that a SNP with |iHS| > CRIT_VAL is marked as an extreme SNP. Default as in Voight et al. (default: %(default)s)""") parser.add_argument('--minSNPs', default=10, type=int, help="""Only consider a bp window if it has at least this many SNPs (default: %(default)s)""") parser.add_argument('--qbins', default=20, type=int, help="""Outlying windows are binned by number of sites within each window. This is the number of quantile bins to use. (default: %(default)s)""") parser.add_argument('--winSize', default=100000, type=int, help="""GThe non-overlapping window size for calculating the percentage of extreme SNPs (default: %(default)s)""") parser.add_argument('--bpWin', default=False, action='store_true', help='If set, will use windows of a constant bp size with varying number of SNPs') return parser
a35fb2ccb4eedacbd480a2fd00d32f2ab5478aab
699,174
def HomePage (): """List all available api routes.""" return( f"Available Routes are: <br/>" f"<a href='/api/v1.0/precipitation'>Precipitation data</a><br/>" f"<a href='/api/v1.0/stations'>List of all the stations</a><br/>" f"<a href='/api/v1.0/tobs'>Last year's temperature for the most active station</a><br/>" f"Temperature statistics since a specified date: <a href='/api/v1.0/2016-01-01'>/api/v1.0/&lt;start_date&gt;</a><br/>" f"Temperature statistics between date range: <a href='/api/v1.0/2016-01-01/2016-01-15'>/api/v1.0/&lt;start_date&gt;/&lt;end_date&gt;</a><br/>" f"<br/>" f"*For dates, please use the format: (eg. 2016-01-01) <br/>" )
6edc00ae7b85e1d885a27c99d248f7f0551fdfd7
699,175