content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def response_to_lines(response): """Converts an HTTP response to a list containing each line of text.""" return response.read().decode("utf-8").replace("\xad", "").split("\n")
b4e2359e9be09f5bc1838033bf77b0f20c670de8
44,493
def getSplicingData(data): """ Keeps only those rows that correspond to splicing mutations. Arguments: data = dataframe Returns: sp_data = dataframe """ sp_data = data[(data["SpliceSite"] == 'yes')] return sp_data
c4039f334ad6450b04b28743c7ee26454f33f4aa
44,494
def mi(self): """Return mutual information between events and observations using equation I(X;Y) = H(X) - H(X|Y)""" return self.entropy() - self.entropy(conditional = True)
18624d4926acf7519dc1b2f9987e11480688be85
44,496
def _build_rule_table(bnf_grammar_ast, terminals, skip): """ Args: bnf_grammar_ast: grammar on bnf ast form produced by _ebnf_grammar_to_bnf terminals (list): list of terminals of the language Returns: A dict that maps every non-terminal to a list of right hand sides of production rules from that non-terminal. """ rule_table = {} for rule in bnf_grammar_ast: lhs = rule[0] rhs = rule[1] if lhs in terminals or lhs in skip: continue if lhs not in rule_table: rule_table[lhs] = [] rule_table[lhs] += [rhs] return rule_table
d95e53a801aeef9a0fc3ba1679ccc7d7b98c2be9
44,497
def get_facts(cat_file="facts.txt"): """Returns a list of facts about cats from the cat file (one fact per line)""" with open(cat_file, "r") as cat_file: return [line for line in cat_file]
b2e9666bc1a833d25e73e61529c256f43bfb5c3b
44,498
import os def path_checker(path): """[Check if path is valid] Args: path ([string]): [path] Raises: Exception: [In case path does not exist] Returns: [string]: [path] """ if "\\" in path: path.replace("\\", "/") return path if os.path.exists(path): return path else: raise Exception("Path does not exist")
2bd57db3e7129c2e15b16b3f3a5b7cc942c54086
44,499
def f1(precision, recall): """Returns the F1-score for the given precision and recall. :param precision: The precision value. :type precision: float :param recall: The recall value. :type recall: float :return: The F1-score for the given precision and recall :rtype: float """ return 2 * (precision * recall) / (precision + recall)
562047332005651ea4014904cd60795d9c60e47d
44,500
def _esc(code): """Get an ANSI color code based on a color number.""" return '\033[{}m'.format(code)
086f9c0062dd78bca6772771b65e60132f1a5d08
44,501
def round_float(number, decimals): """ Rounds decimal number to exact number of decimals. :param number: (float) Float number. :param decimals: (int) Number of numbers after decimal point :return: """ number = float(number) out = round(number, decimals) return out
faa92a2fe2187f111cc028395085edd8b4dda8f1
44,503
def selection_sort(arr): """ Find the min element and put it to front. """ comp = 0 for i in range(len(arr)): min_idx = i # for j in range(i+1, len(arr)): if arr[min_idx] > arr[j]: min_idx = j comp += 1 arr[i], arr[min_idx] = arr[min_idx], arr[i] return comp
1c0458db44a931d5bfa08f80adc583d70eb1fcfd
44,504
def calc_point_squre_dist(point_a, point_b): """Calculate distance between two marking points.""" distx = point_a[0] - point_b[0] disty = point_a[1] - point_b[1] return distx ** 2 + disty ** 2
855d1678f8ff66c0047c45441942e6e9d978fe2e
44,505
import unittest def discover_tests(startdir): """Discover test under a directory """ # Avoid importing unittest loader = unittest.TestLoader() suite = loader.discover(startdir) return suite
af9fafe8ca8223c7d732e0c59b9ef40fc43a1c29
44,511
def find_pivot(input_arr, min_idx, max_idx): """ Find the the pivor index of an rotated array Time complexity: O(1og2(n)) Space Complexity: O(1) Args: input_array(array): rotated array Returns: pivot_idx(int) """ mid = (min_idx + max_idx) // 2 # if mid element is higher than the next one, we found an pivot if mid < max_idx and input_arr[mid] > input_arr[mid + 1]: return mid # if mid-1 element is higher than the next one (mid element), we found an pivot if mid > min_idx and input_arr[mid] < input_arr[mid - 1]: return (mid-1) # if the first element is higher than the current (mid) element, # call recrusion for the lower interval if input_arr[min_idx] >= input_arr[mid]: return find_pivot(input_arr, min_idx, mid-1) # else if the first element is lower than the current (mid) element, # call recrusion for the higher interval else: return find_pivot(input_arr, mid + 1, max_idx)
888170f099f78fda36b4832757f11fe9d0d66d83
44,512
def opv(d, func, *args): """ Apply func to all values of a dictionary. :param d: A dictionary. :param func: Callable accepting a value of `d` as first parameter. :param args: Additional positional arguments to be passed to `func`. :return: `dict` mapping the keys of `d` to the return value of the function call. """ return {i: func(v, *args) for i, v in d.items()}
418abaf1e7a843a1c280d2c55fa7ac22f9496d2d
44,513
from typing import List from typing import Any def _list_split( lst: List[Any], split_point: Any ) -> List[List[Any]]: """ Splits a given lists into multiple lists based on the provided split points. :param lst: Given list that needs to be split. :param split_point: Element in the list that is used as a delimiter to split the list into different lists. :return: A list of lists containing the separated lists. """ temp = [] final = [] for element in lst: if split_point in element: final.append(temp) temp = [element] else: temp.append(element) final.append(temp) final.remove([]) return final
e3eb6613e56757f11a755bd38e3d5acbba8eec75
44,514
def pk_encode_public_key(key): """Creates an ASN1 representation of a public key for external storage.""" return key.encode_key(1)
c3b769d96a990346f2445b0e373363244202838f
44,515
def _value_properties_are_referenced(val): """ val is a dictionary :param val: :return: True/False """ if ((u'properties' in val.keys()) and (u'$ref' in val['properties'].keys())): return True return False
1004edc48113a421302b1111c6c1f244bfb8c02b
44,516
def path2handle(path): """ Translates a full path to a handle. """ return path.split('/')[-1]
644328243d494707a02f1746d8d8e2987342761b
44,517
def coronavirus_misspellings_and_typos_regex(): """Return a regex you can use to replace misspellings of coronavirus with the correct spelling >>> import re >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'caronavirus')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'coranavirus')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'cornavirus')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'cornovirus')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'coronaviris')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'cornoavirus')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'coronavirius')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'coronavirous')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'coronaviru')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'coronaviurs')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'coronavius')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'coronoavirus')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'coronovirus')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'coronvirus')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'corona iris')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'corona virus')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'covid')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'corona')] ['coronavirus'] >>> [re.sub(coronavirus_misspellings_and_typos_regex(), 'coronavirus', 'coron')] ['coronavirus'] """ return "coronavirus|covid19|covid.19|caronavirus|coranavirus|cornavirus|cornovirus|coronaviris|cornoavirus|" + \ "coronavirius|coronavirous|coronaviru|coronaviurs|coronavius|coronoavirus|coronovirus|coronvirus|" + \ "corona iris|corona virus|covid|corona|coron"
c741896183208251b4262cb0fa59a40230b6a762
44,518
import csv def _read_csv( filepath: str, delimiter: str = ",", quotechar: str = '"', escapechar: str = "" ) -> dict: """Process a given csv into a python dictionary Arguments: filepath: string pointing to csv file delimiter: string that denotes what char seperates values in csv, default is ',' quotechar: string that denotes what char surrounds fields containing delimeter char, default is '"' escapechar: string that denotes what char escaptes the delimeter char, default is no escape char. Returns: csv_dict: dictionary whose keys are column numbers and values are column lists """ csv_dict = {} with open(filepath, "r") as csv_file: csv_reader = csv.reader( csv_file, delimiter=delimiter, quotechar=quotechar, escapechar=escapechar ) header = next(csv_reader) num_cols = len(header) for num in range(num_cols): csv_dict[num] = [header[num]] for row in csv_reader: for num in range(num_cols): csv_dict[num].append(row[num]) return csv_dict
4a57d54bb569f85ba6721682c9edb501a70b95cb
44,520
def sort_dict_by_value(m_dict): """Sort the dict by value""" list_tuple = [(x, m_dict[x]) for x in m_dict] list_tuple.sort(key = lambda x : -x[1]) return list_tuple
f4e84eed9e4353ddc67822ecccef5ed0aa2a7dc2
44,521
def timTrangSinh(cucSo): """Tìm vị trí của Tràng sinh Theo thứ tự cục số vị trí Tràng sinh sẽ là Dần, Tỵ, Thân hoặc Hợi *LƯU Ý* Theo cụ Thiên Lương: Nam -> Thuận, Nữ -> Nghịch Args: cucSo (int): số cục (2, 3, 4, 5, 6) Returns: int: Vị trí sao Tràng sinh Raises: Exception: Description """ if cucSo == 6: # Hỏa lục cục return 3 # Tràng sinh ở Dần elif cucSo == 4: # Kim tứ cục return 6 # Tràng sinh ở Tỵ elif cucSo == 2 or cucSo == 5: # Thủy nhị cục, Thổ ngũ cục return 9 # Tràng sinh ở Thân elif cucSo == 3: # Mộc tam cục return 12 # Tràng sinh ở Hợi else: # print cucSo raise Exception("Không tìm được cung an sao Trường sinh")
fe187369531f864fe5bc8a9a76dc5e5641e9d891
44,522
import math def calculate_angles(start, end): """求两个点连线和水平面的锐角夹角。 Args: start: 起点。 end: 终点。 Returns: 返回夹角弧度。 """ vector = end - start return math.atan(vector[1] / vector[0])
18ee4f5f2de0468dbe77fc603d4026615aa71b4e
44,523
import click def check_whether_to_tag(tags, metadata): """ Make sure the number of tracks in the metadata equals the number of tracks in the folder. """ if len(tags) != sum([len(disc) for disc in metadata["tracks"].values()]): click.secho( "Number of tracks differed from number of tracks in metadata, " "skipping retagging procedure...", fg="red", ) return False return True
fd19927988151e7d9022a884c133b394481773d3
44,524
def check_for_tree(x, y, trees, max_x): """ Checks to see if a tree exists in the given location, taking into account the infinite repeating pattern. >>> check_for_tree(0, 0, {(0, 0)}, 1) True >>> check_for_tree(0, 0, {(0, 1)}, 1) False >>> check_for_tree(15, 0, {(0, 0)}, 5) True :param x: :param y: :param trees: :param max_x: :return: """ while x >= max_x: x -= max_x return (x, y) in trees
6ef440937634fedd3d9cd352f6d0cbd0805147b0
44,525
def var2str(var_name): """Convert names used for computation into better suitables names for plots (specific to the tropopause problem and the examples already implemented) :param var_name: name of the variable during computation :type var_name: str :return: the name of the variable for the plots :rtype: str """ switcher_name = { 'ut':r'$u_t \ (m.s^{-1})$', 'vt':r'$v_t \ (m.s^{-1})$', 'theta_t':r"$\theta^{'}_{tp} \ (K)$", 'us':r'$u_s \ (m.s^{-1})$', 'vs':r'$v_s \ (m.s^{-1})$', 'w':r'$w \ (m.s^{-1})$', 'Delta_z':r'$\Delta z \ (m)$', 'Delta_T_bb':r'$\Delta T_{bb} \ (K)$', 'Delta_T_hist':r'$\Delta T_{bb}^{hist} \ (K)$' } return switcher_name.get(var_name, var_name)
0bab37a58e155316abba3fb7bdc1b3754adf53af
44,526
def ct_neq_u32(val_a, val_b): """ Return 1 if val_a != val_b, 0 otherwise. Constant time. :type val_a: int :type val_b: int :param val_a: an unsigned integer representable as a 32 bit value :param val_b: an unsigned integer representable as a 32 bit value :rtype: int """ val_a &= 0xffffffff val_b &= 0xffffffff return (((val_a-val_b)&0xffffffff) | ((val_b-val_a)&0xffffffff)) >> 31
57f9b86232c45d2d271f9f7800519494f3802c2f
44,527
def file_system_arn(arn): """ Converts an ARN to a file-system friendly string, so that it can be used for directory & file names """ for source, dest in {":": "#", "/": "_", " ": "_"}.items(): arn = arn.replace(source, dest) return arn
2c355a91e48a5ad87682e945d37f3b9c61311e46
44,528
from concurrent.futures import ThreadPoolExecutor import functools import asyncio def force_async(fn): """Turns a sync function to async function using threads Arguments: fn {function} Returns: function - awaitable function """ pool = ThreadPoolExecutor() @functools.wraps(fn) def async_wrapper(*args, **kwargs): future = pool.submit(fn, *args, **kwargs) return asyncio.wrap_future(future) # make it awaitable return async_wrapper
e1ccfd6f48efda171b8f5d9d7c863788f2b03a62
44,529
import torch def dice_loss_with_sigmoid(sigmoid, targets, smooth=1.0): """ sigmoid: (torch.float32) shape (N, 1, H, W) targets: (torch.float32) shape (N, H, W), value {0,1} """ outputs = torch.squeeze(sigmoid, dim=1) inter = outputs * targets dice = 1 - ((2*inter.sum(dim=(1,2)) + smooth) / (outputs.sum(dim=(1,2))+targets.sum(dim=(1,2)) + smooth)) dice = dice.mean() return dice
ed01b4f531565f260bde78fb53aad67e01961012
44,530
def getIfromRGB(rgb): """ Converts rgb tuple to integer :param rgb: the rgb tuple n 255 scale :return: the integer """ red = rgb[0] green = rgb[1] blue = rgb[2] RGBint = (red << 16) + (green << 8) + blue return RGBint
b5135d62f9c602997bed5c8feabf4224d41e85ee
44,532
def getListOfTasks(workload): """ _getListOfTasks_ Get a complete list of tasks in a workload Returns a list of task helpers """ listOfTasks = [] for primeTask in workload.taskIterator(): for task in primeTask.taskIterator(): listOfTasks.append(task) return listOfTasks
c2f521c7ad21913de0c366d722da493d91a61043
44,535
import copy def subtract(cut_plane_a_in, cut_plane_b_in): """ Subtract u,v,w terms of cut_plane_b from cut_plane_a Args: cut_plane_a_in (:py:class:`~.tools.cut_plane.CutPlane`): Plane of data to subtract from. cut_plane_b_in (:py:class:`~.tools.cut_plane.CutPlane`): Plane of data to subtract b. Returns: cut_plane (:py:class:`~.tools.cut_plane.CutPlane`): Difference of cut_plane_a_in minus cut_plane_b_in. """ # First make copies of original cut_plane_a = copy.deepcopy(cut_plane_a_in) cut_plane_b = copy.deepcopy(cut_plane_b_in) # Sort x1 and x2 and make the index cut_plane_a.df = cut_plane_a.df.set_index(["x1", "x2"]) cut_plane_b.df = cut_plane_b.df.set_index(["x1", "x2"]) # Do subtraction cut_plane_a.df = cut_plane_a.df.subtract( cut_plane_b.df ).reset_index() # .sort_values(['x2','x1'])# .dropna() # cut_plane_a.df = cut_plane_a.df.sort_values(['x1','x2']) return cut_plane_a
b4b74c32d7008465a5c928a7b67a5cfe6973bc29
44,536
def fix_count(count): """Adds commas to a number representing a count""" return '{:,}'.format(int(count))
c0a85f118447a0643952ae0ebc02a0fe117de102
44,538
def set_name_h(name_x, name_q): """Set the full instruments names in regression; return generic name if user provides no explicit name." Parameters ---------- name_x : list of strings User provided exogenous variable names. name_q : list of strings User provided instrument variable names. Returns ------- name_h : list of strings """ return name_x + name_q
1ffadb8e798a96f0661e7b94918adcbd044d0959
44,540
import importlib def packageHasMethod(package,method): """ checks if the package exits and if it has the method returns None if no package was found returns a bool for found class example: packageHasMethod('PySide6.QtWidgets','QApplication') """ try: imported = importlib.import_module(package) if imported: return hasattr(imported,method) except: return None
613f1a1f79ccedc887441465956f9d1561bdadc6
44,544
import configparser def _config_ini(path): """ Parse an ini file :param path: The path to a file to parse :type file: str :returns: Configuration contained in path :rtype: Dict """ # When strict is enabled, duplicate options are not allowed in the # parsed INI; however, Oslo allows duplicate values. This change # causes us to ignore the duplicate values which is acceptable as # long as we don't validate any multi-value options conf = configparser.ConfigParser(strict=False) conf.read(path) return dict(conf)
f8827dc558de4d8945e2a2789f7ce99788161cdf
44,545
import unicodedata def remove_control_chars(utt): """Remove control characters in utterance. Args: utt (str): The utterance. Returns: str: The utterance after processing. """ utt = utt.strip().lower() if isinstance(utt, str): return "".join([(c if unicodedata.category(c)[0] != "C" else " ") for c in utt]) elif isinstance(utt, list): return ["".join([(c if unicodedata.category(c)[0] != "C" else " ") for c in utt]) for u in utt] return utt
35c857f605d3c958cae4fe253f8924b13ee4a798
44,546
def divisible_cheksum(string): """Take rows of numbers and return the sum of their divisible pairs.""" total = 0 # Split string so each row is its own embedded list row_list = [] rows_split = string.split('\n') for i in rows_split: row_string = str(i) row = row_string.split() row_list.append(row) # For each number in row, see if it divides with no remainder for row in row_list: for number in row: count = 0 while count < len(row) - 1: # Check division order and that it divides with no remainder if (int(number) > int(row[count + 1]) and int(number) % int(row[count + 1]) == 0): total += int(number) // int(row[count + 1]) count += 1 return total
0ffcb6d6d908d203f4b1203dab3be16e923b1fab
44,547
def call(func): """Just call the input function with not arguments. Equivalent to `functioncaller()` >>> from lined import Line >>> from functools import partial >>> >>> line = Line(lambda x: partial(print, f"{x*3=}"), call) >>> line(14) x*3=42 """ return func()
b1043aeac4b32ea2d1a3b5233f7230e9225e5e91
44,548
import math def rotate(x, y, degree): """Rotate a coordinate around point (0,0). - x and y specify the coordinate. - degree is a number from 0 to 360. Returns a new coordinate. """ radian = float(degree) * 2 * math.pi / 360.0 newx = math.cos(radian) * x - math.sin(radian) * y newy = math.sin(radian) * x + math.cos(radian) * y return (newx, newy)
6dd39e71d5fece2de6a829c5f62975e4f271cc30
44,549
def _raise_exception_if_invalid_action_values( action_values=None, container_type=list, empty_allowed=False, different_item_types_allowed=False, preferred_exception=ValueError, ): """Raise an exception if supplied action_values are deemed invalid Args: action_values (list): aciton_values to check validity for container_type (list): container type of action_Values empty_allowed(bool): Whether action_values is allowed to be emoty preferred_exception(Exception): preferred exception to call to raise an error """ # Raise exception if action_values is still None if action_values is None: raise preferred_exception( "Please supply required attribute: action_values" ) # Non-empty action_values else: # Raise exception if action_values is not of container type if not isinstance(action_values, container_type): raise preferred_exception( "action_values has to be of type: {}".format(container_type) ) # Raise exception if action_values list is empty # and empty not allowed elif len(action_values) == 0: if not empty_allowed: raise preferred_exception( "Required attribute action_values cannot be empty list" ) # Raise exception if action_values list has different types # and different item types not allowed else: def has_different_types_of_items(some_list): return len(set([v.__class__ for v in some_list])) != 1 if has_different_types_of_items(action_values): if not different_item_types_allowed: raise preferred_exception( "Items in this action's action_values should all " "have the same type" )
0aeb233c341eb1698c09742ded01c4cd133ad77a
44,551
def additionner(a, b): """Fonction qui renvoie la somme de deux nombres.""" if not isinstance(a, int) or not isinstance(b, int): raise TypeError return a + b
9682da47831987be4637c0aa7d0c95c805763e5c
44,552
def _find_types(api, project): """ Returns dict containing mapping from dtype to type. """ loc_types = api.get_localization_type_list(project) state_types = api.get_state_type_list(project) loc_types = {loc_type.dtype:loc_type for loc_type in loc_types} state_types = {state_type.association:state_type for state_type in state_types} return loc_types, state_types
4c99582b5c69cec45925e94564c955e1ad532752
44,553
def get_account_ticket_count() -> int: """get_account_ticket_count() -> int (internal) Returns the number of tickets for the current account. """ return int()
f57a279d7415382ae3a52f26a9a1dc83a72f0b4b
44,554
import subprocess def sway_set_split(con_id, split): """ Sends the split layout msg to sway """ cmd = '[con_id="{}"] {}'.format(con_id, split) process = subprocess.Popen( ['swaymsg', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) return True
83a5887b97a8f5449a669d826f707cdf4500c8ec
44,555
def filter_antibiotics(prescriptions): """ Filters prescriptions for antibiotics that can be used for Sepsis-3 computation Based on the following sql script by Alistair Johnson and Tom Pollard: https://github.com/alistairewj/sepsis3-mimic/blob/master/query/tbls/abx-poe-list.sql """ prescriptions = prescriptions.copy() prescriptions['route'] = prescriptions.route.str.lower() prescriptions['drug'] = prescriptions.drug.str.lower() prescriptions.dropna(subset=['drug'], inplace=True) # we exclude routes via the eye, ears, or topically mask = ~prescriptions.route.str.contains("ear").astype(bool) mask &= ~prescriptions.route.str.contains("eye").astype(bool) mask &= ~prescriptions.route.isin(('ou','os','od','au','as','ad', 'tp')) prescriptions = prescriptions.loc[mask] mask = prescriptions.drug.isin([ 'cefazolin', 'piperacillin-tazobactam', 'vancomycin', 'sulfameth/trimethoprim ds', 'levofloxacin', 'sulfameth/trimethoprim ss', 'amoxicillin-clavulanic acid', 'aztreonam', 'azithromycin ', 'metronidazole (flagyl)', 'piperacillin-tazobactam na', 'ampicillin-sulbactam', 'doxycycline hyclate', 'nitrofurantoin monohyd (macrobid)', 'cefepime', 'ceftazidime', 'amoxicillin', 'clarithromycin', 'azithromycin', 'ciprofloxacin hcl', 'tobramycin sulfate', 'clindamycin', 'cephalexin', 'metronidazole', 'ampicillin sodium', 'ciprofloxacin iv', 'vancomycin intraventricular', 'vancomycin oral liquid', 'cefpodoxime proxetil', 'gentamicin', 'nitrofurantoin (macrodantin)', 'vancomycin enema', 'amoxicillin oral susp.', 'clindamycin solution', 'minocycline', 'ceftolozane-tazobactam', 'erythromycin', 'amoxicillin-clavulanate susp.', 'sulfameth/trimethoprim suspension', 'dicloxacillin', 'vancomycin antibiotic lock', 'sulfameth/trimethoprim', 'amikacin', 'ampicillin', 'gentamicin sulfate', 'trimethoprim', 'tetracycline hcl', 'moxifloxacin', 'sulfamethoxazole-trimethoprim', 'sulfadiazine', 'ceftazidime antibiotic lock', 'penicillin v potassium', 'penicillin g benzathine', 'penicillin g potassium', 'avelox', 'rifampin', 'tetracycline', 'ery-tab', 'erythromycin ethylsuccinate suspension', 'ciprofloxacin', 'doxycycline', 'bactrim', 'vancomycin ', 'amikacin inhalation', 'penicillin g k graded challenge', 'cefadroxil', 'tobramycin inhalation soln', 'vancocin', 'cefepime graded challenge', 'ceftolozane-tazobactam *nf*', 'ceftazidime graded challenge', 'piperacillin-tazo graded challenge', 'augmentin suspension', 'nitrofurantoin macrocrystal', 'ampicillin-sulbact graded challenge', 'clindamycin suspension', 'ceftazidime-avibactam *nf*', 'augmentin', 'ampicillin graded challenge', 'doxycycline hyclate 20mg', 'clindamycin phosphate', 'cefdinir', 'gentamicin (bulk)', 'streptomycin sulfate', 'vancomycin intrathecal', 'ceftazidime-avibactam (avycaz)', 'nitrofurantoin ', 'cefpodoxime', 'oxacillin', 'cipro', '*nf* moxifloxacin', 'flagyl', 'nitrofurantoin', 'levofloxacin graded challenge', 'tobramycin with nebulizer', 'keflex', 'chloramphenicol na succ', 'tobramycin in 0.225 % nacl', 'ciprofloxacin ', 'doxycycline monohydrate', 'vancomycin 125mg cap', 'vancomycin ora', 'gentamicin antibiotic lock', 'cefotaxime', 'ciproflox', 'amoxicillin-clavulanate susp', 'amoxicillin-pot clavulanate', 'gentamicin intraventricular', 'gentamicin 2.5 mg/ml in sodium citrate 4%', 'sulfameth/trimethoprim ', 'trimethoprim-sulfamethoxazole', 'cefuroxime axetil', 'vancomycin 250 mg', 'tobramycin', 'levofloxacin 100mg/4ml solution', 'macrodantin', 'rifampin 150mg capsules', 'cefoxitin', '*nf* cefoxitin sodium', 'ampicillin-sulbactam sodium', 'doxycycline ', 'bactrim ', 'bactrim ds', 'neo*iv*gentamicin', 'neo*iv*oxacillin', 'neo*iv*vancomycin', 'neo*iv*penicillin g potassium', 'neo*iv*cefotaxime', 'trimethoprim oral soln', 'cephalexin suspension', 'penicillin ', 'neo*iv*cefazolin', 'levofloxacin ', 'neo*iv*ceftazidime', 'neo*po*azithromycin', 'erythromycin ethylsuccinate', 'zithromax z-pak', 'vancomycin for inhalation', 'vancomycin for nasal inhalation', 'penicillin v potassium suspension', 'vancocin (vancomycin)', 'minocycline 100mg tablets', 'clindamycin cap', 'cefpodoxime 200mg tab', 'clindamycin hcl caps', 'clindamycin hcl', 'nitrofurantoin monohyd/m-cryst', 'nitrofurantoin macrocrystals', 'nitrofurantoin macrocystals', 'vancomycin capsule', '*nf* cefuroxime', 'vancomycin oral capsule', 'vancomycin caps', 'erythromycin ', 'azithromycin po susp', 'cayston', 'vancomycin 250mg', 'cefotaxime ', 'vancomycin-heparin lock', 'amoxicillin-clavulanate po susp 400 mg-57 mg/5 ml', 'penicillin v potassium solution', 'inv-tivantinib', 'cefazolin 2 g']) prescriptions = prescriptions.loc[mask] return prescriptions
91c6bdc5a52da77ed8e5111f900d3d8c3cc5133f
44,556
def deci_to_time(ra=None, dec=None): """ Converts decimal values of ra and dec into arc time coordinates. INPUTS ra: The float value of right ascension. dec: The float value of declination. OUTPUTS new_ra: The converted RA. If no ra supplied, returns -1 new_dec: The converted dec. If no dec supplied, returns -1 """ new_ra = -1 new_dec = -1 if ra is not None: if type(ra) != float: raise ValueError('DECI_TO_TIME: RA is not a float. Cannot convert.') # First, we find the number of hours: hours = ra / 15.0 hoursInt = int(hours) hours = hours - hoursInt # Next, we want minutes: minutes = hours * 60.0 minInt = int(minutes) minutes = minutes - minInt # Lastly, seconds: seconds = minutes * 60.0 new_ra = '{0:02d} {1:02d} {2:.2f}'.format(hoursInt, minInt, seconds) if dec is not None: if type(dec) != float: raise ValueError('DECI_TO_TIME: Dec is not a float. Cannot convert.') # For dec, have to check and store the sign: if dec < 0.0: sign = '-' else: sign = '+' dec = abs(dec) # First, we find the number of degrees: degInt = int(dec) deg = dec - degInt # Next, we want minutes: minutes = deg * 60.0 minInt = int(minutes) minutes = minutes - minInt # Lastly, seconds: seconds = minutes * 60.0 new_dec = '{0:s}{1:02d} {2:02d} {3:.2f}'.format(sign, degInt, minInt, seconds) return new_ra, new_dec
0680795f73973aadfd585f0ec61dd79a2838ea58
44,558
import sys import yaml from unittest.mock import call def main(): """Main function. Reading transform info from a yaml file and publish to tf2 """ if len(sys.argv) < 2: print('Usage: %s extrinsic_example.yaml' % sys.argv[0]) sys.exit(0) with open(sys.argv[1]) as fp: transform_stamped = yaml.safe_load(fp) command = 'rosrun tf2_ros static_transform_publisher ' \ '%f %f %f %f %f %f %f %s %s' % \ (transform_stamped['transform']['translation']['x'], transform_stamped['transform']['translation']['y'], transform_stamped['transform']['translation']['z'], transform_stamped['transform']['rotation']['x'], transform_stamped['transform']['rotation']['y'], transform_stamped['transform']['rotation']['z'], transform_stamped['transform']['rotation']['w'], transform_stamped['header']['frame_id'], transform_stamped['child_frame_id']) print(command) try: return call(command, shell=True) except OSError as e: print(e)
a848a8a282fbddc343e29b24fa3369899fdde6ad
44,559
import random import string def random_string(strlen=10): """Generating a random string of a certain length""" return "".join([random.choice(string.ascii_letters) for _ in range(strlen)])
87b9ed7dae2a1ae341b64a764b1e467287e0e194
44,560
import argparse def parse_args(): """Parse commandline arguments.""" parser = argparse.ArgumentParser( description='Standardize street addresses.') parser.add_argument( '--input_file', required=True, help='file containing data to standardize') parser.add_argument( '--output_file', required=True, help='file to which standardized data is to be written') parser.add_argument( '--housenum_column', required=True, help='column containing house number data to standardize') parser.add_argument( '--street_column', required=True, help='column containing street data to standardize') parser.add_argument('--sep', default=',', help='column separator') parser.add_argument( '--chunksize', type=int, default=100000, help='number of records to process at a time') return parser.parse_args()
eaa50a4197d49a05d3d870b1522f56e1d529b0c6
44,561
def counting_valleys(n, s): """altitude, down_count, valley_count = 0, 0, 0 for i in range(len(s)): letter = s[i] if letter == 'U' and altitude < 0: altitude += 1 # if down_count > 0: # down_count -= 1 elif letter == 'D': # down_count += 1 altitude -= 1 print(altitude) if altitude == 0: valley_count += 1 return valley_count steps = list() for i in range(len(s)): letter = s[i] if letter == 'U' and len(steps) > 0: steps.pop() # if down_count > 0: # down_count -= 1 elif letter == 'D': # down_count += 1 steps.append(letter) print(steps) if len(steps) == 0: valley_count += 1 return valley_count altitude, valley_count, i = 0, 0, 0 while i < len(s): letter = s[i] if letter == 'U': altitude += 1 elif letter == 'D': altitude -= 1 if altitude <= 0: valley_depth, j = altitude * -1, i + 1 while j < len(s): letter_of_valley = s[j] if letter_of_valley == 'U': valley_depth -= 1 altitude += 1 elif letter_of_valley == 'D': valley_depth += 1 altitude -= 1 if valley_depth == 0: valley_count += 1 i += j - i # advance forward the outer loop j = len(s) # breaks the inner loop else: j += 1 else: i += 1 return valley_count""" altitude, valley_depth, valley_count = 0, 0, 0 for letter in s: # adjust the altitude and depth on a 'D' if letter == "D": # entering a valley if altitude == 0: valley_depth += 1 altitude -= 1 # adjust the altitude and depth on a 'U' elif letter == "U": altitude += 1 if valley_depth > 0: valley_depth -= 1 # exiting a valley if valley_depth == 0 and altitude == 0: valley_count += 1 return valley_count
cc993ad4acdc68d1dc222020769962c1ef4f25a0
44,562
import subprocess def to_text(path): """Null input wrapper. Processe TXT files instead of PDF ones. Parameters ---------- path : str path of electronic invoice in txt Returns ------- out : str returns extracted text from txt """ out, err = subprocess.Popen( ["cat", path], stdout=subprocess.PIPE ).communicate() return out
92b260b32538e27bc1d1312a1629201052e507e8
44,563
def weight_counts(term_counts, patterns_to_coefficients): """ Inputs: term_counts --- a mapping of terms to their counts patterns_to_coefficients --- a mapping of site patterns to coefficients needed to maintain the null Output: weighted_total --- the total counts for the site patterns weighted """ # Create a mapping of patterns to their weighted counts weighted_counts = {} # Iterate over each pattern for pattern in term_counts: # Weight its count based on the coefficient if pattern in patterns_to_coefficients: coefficient = patterns_to_coefficients[pattern] else: coefficient = 1 count = term_counts[pattern] weighted_counts[pattern] = count * coefficient weighted_total = sum(weighted_counts.values()) return weighted_total, weighted_counts
bc0640b0b5213d3e093d4963075646ae5f6dc330
44,564
def trace_stem(bp_compatiable_matrix, i, j): """ Identifies individal stems by identifying uninterrupt base-pairs connect to i,j. Such base-pairs radiate diagonally from i,j in bp_compatiable_matrix """ stem_s1,stem_s2 = [],[] terminal = False while not terminal: if bp_compatiable_matrix[i,j] != 0: stem_s1.append(i) stem_s2.append(j) i += 1 j -= 1 else: terminal = True return (stem_s1, stem_s2)
0ff0ef9fdd51d143b931bad8afb19c2ccd893314
44,565
def clean_api_url_response(url_response): """ clean the string to a valid URL field (used with API data, because sometimes there are multiple or entries """ clean_response = url_response.strip() if url_response != "": clean_response = clean_response if ";" not in clean_response else clean_response.split(";")[0] clean_response = clean_response if " or http://" not in clean_response \ else clean_response.split(" or http://")[0] clean_response = clean_response if " and http://" not in clean_response \ else clean_response.split(" and http://")[0] clean_response = clean_response if " http://" not in clean_response \ else clean_response.split(" http://")[0] clean_response = clean_response if " and https://" not in clean_response \ else clean_response.split(" and https://")[0] clean_response = clean_response if " or https://" not in clean_response \ else clean_response.split(" or https://")[0] clean_response = clean_response if " https://" not in clean_response \ else clean_response.split(" https://")[0] return clean_response
1c9acd4446378bb9b5c12558c7d86d9fd6f50a90
44,566
def check_neg_persp(input_words, vader_neg, vader_compound, include_nt=True): """ Determine the degree of negative perspective of text Returns an float for score (higher is more negative) """ neg_persp_score = 0.0 neg_words = ["aint", "arent", "cannot", "cant", "couldnt", "darent", "didnt", "doesnt", "ain't", "aren't", "can't", "couldn't", "daren't", "didn't", "doesn't", "dont", "hadnt", "hasnt", "havent", "isnt", "mightnt", "mustnt", "neither", "don't", "hadn't", "hasn't", "haven't", "isn't", "mightn't", "mustn't", "neednt", "needn't", "never", "none", "nope", "nor", "not", "nothing", "nowhere", "oughtnt", "shant", "shouldnt", "uhuh", "wasnt", "werent", "oughtn't", "shan't", "shouldn't", "uh-uh", "wasn't", "weren't", "without", "wont", "wouldnt", "won't", "wouldn't", "rarely", "seldom", "despite"] for word in neg_words: if word in input_words: neg_persp_score += 1 if include_nt: for word in input_words: if "n't" in word and word not in neg_words: neg_persp_score += 1 if vader_neg > 0.0: neg_persp_score += vader_neg if vader_compound < 0.0: neg_persp_score += abs(vader_compound) return neg_persp_score
df37d790739eba5663df2740d1da33fe63ae1e6f
44,571
def extract_sentences(original_text): """ Splits paragraph text into list of sentences. \n Note: If text contains xml tags, this does not remove them. :param original_text: Document text to split into list of sentences. :return: List of sentences from document text passed in. """ return original_text.split('. ')
653a9b4914fee446d785ba7cd7aa1e0460d109aa
44,573
def repeat_analysis(genes_dictionary, repeats_list): """Find repeat-gene matches. Method that finds repeats to gene matches. If repeat falls into a gene, the position of the repeat is evaluated to see whether matches a exon or intron. Method returns a list of genes with repeats. Parameters ---------- genes_dictionary : TYPE Dictionary containing genes instances. repeats_list : List List of repeats [(repeat_position, repeat_number), ...]. Returns ------- genes_with_repeats : List List containing genes with repeats. """ # List of gene instances that matched a repeat genes_with_repeats = [] for geneID in genes_dictionary.keys(): gene = genes_dictionary[geneID] gene_end = gene.get_seq_range()[1] for repeat in repeats_list: rep_start = repeat.get_seq_range()[0] # check if repeat is inside the gene match = gene.is_subsequence(repeat) if match: # analyze where the repeat falls if gene.get_transcripts() != {}: gene.position_analysis(repeat) gene.position_analysis_mRNA(repeat) # Add geneID to result list if gene not in genes_with_repeats: genes_with_repeats.append(gene) elif not match and gene_end < rep_start: break return genes_with_repeats
d87c35295e0dfe8858320f1ddb60b75f5b982c75
44,574
from typing import Union from typing import SupportsInt def int_or_default(value: Union[str, bytes, SupportsInt], default: int) -> int: """ Transforms the value given in parameter into a int, is possible. Otherwise, use the default value. :param value: the value to transform into an int :type value: object :param default: the default value to use, if the conversion fails. :type default: int :return: the converted value, or the default one. :rtype: int """ try: value = int(value) except ValueError: value = default return value
a9628080194b62e5cfbecca717f2763ef8c6254b
44,577
def cli(ctx, history_id, jeha_id, outf, chunk_size=4096): """Download a history export archive. Use :meth:`export_history` to create an export. Output: None """ return ctx.gi.histories.download_history(history_id, jeha_id, outf, chunk_size=chunk_size)
6c193d34933d77c5007b9ab094d934a99dcc3f20
44,578
def remove_index(data, value): """ Removes an index in the data for every gene """ in_data = False val_index = None if value in data[0]: in_data = True val_index = data[0].index(value) if in_data: for line in data: del line[val_index] return data
2c78946d4c4f202fc4781909de5fefed297f9932
44,579
def remove_selective_dynamics(basis): """ If the selective dyanmics tag is set, allow all atoms to move by setting selective dynamics to True Args: basis (pyiron_atomistics.structure.atoms.Atoms): Atomistic structure object Returns: Atoms: Atomistic structure object with selective dynamics set to True """ if "selective_dynamics" in basis._tag_list.keys(): for ind in range(len(basis)): basis.selective_dynamics[ind] = [True, True, True] return basis
0dc6158621e19e26246a5c81a2d89f10aa3af5fa
44,580
import numpy def dens2lip( dens_gcm3, dens_lipid=0.9007, dens_prot=1.34, dens_water=0.994, dens_ash=2.3 ): """Get percent composition of animal from body density The equation calculating animal density is from Biuw et al. (2003), and default values for component densities are from human studies collected in the book by Moore et al. (1963). Args ---- dens_gcm3: float or ndarray An array of seal densities (g/cm^3). The calculations only yield valid percents with densities between 0.888-1.123 with other parameters left as defaults. dens_lipid: float Density of lipid content in the animal (g/cm^3) dens_prot: float Density of protein content in the animal (g/cm^3) dens_water: float Density of water content in the animal (g/cm^3) dens_ash: float Density of ash content in the animal (g/cm^3) Returns ------- perc_all: pandas.DataFrame Dataframe of components of body composition References ---------- Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of free-ranging seals using simple dive characteristics. Journal of Experimental Biology 206, 3405–3423. doi:10.1242/jeb.00583 Moore FD, Oleson KH, McMurrery JD, Parker HV, Ball MR, Boyden CM. The Body Cell Mass and Its Supporting Environment - The Composition in Health and Disease. Philadelphia: W.B. Saunders Company; 1963. 535 p. ISBN:0-7216-6480-6 """ # Cast iterables to numpy array if numpy.iterable(dens_gcm3): dens_gcm3 = numpy.asarray(dens_gcm3) # Numerators ad_num = -3.2248 * dens_ash pd_num = -25.2786 * dens_prot wd_num = -71.4966 * dens_water # Denominators ad_den = -0.034 * dens_ash pd_den = -0.2857 * dens_prot wd_den = -0.6803 * dens_water perc_lipid = ((100 * dens_gcm3) + ad_num + pd_num + wd_num) / ( dens_lipid + ad_den + pd_den + wd_den ) return perc_lipid
220887efad7ae74d692a5b2f494d96bf559fb9ad
44,581
def get_node_instance(node_instance_id, rest_client): """ Get a node instance object. :param node_instance_id: The ID of the node instance. :type node_instance_id: str :param rest_client: A Cloudify REST client. :type rest_client: cloudify_rest_client.client.CloudifyClient :return: request's JSON response :rtype: dict """ return rest_client.node_instance.get(node_instance_id=node_instance_id)
1c9554307d5a4552d7233d3a48025e624efa3c29
44,582
import binascii def encode_domain(domain): """Given a domain with possible Unicode chars, encode it to hex.""" try: return binascii.hexlify(domain.encode('idna')) except UnicodeError: # Some strange invalid Unicode domains return None
ae2d761adcf5956b9657ea8d60d3ea202f19f241
44,583
from datetime import date def votar(ano=2000): """ -> Verifica a situação de voto de acordo com o ano de nascimento da pessoa :param ano: ano de nascimento da pessoa, (padrão ano 2000) :return: Retorna a situação da pessoa """ idade = date.today().year - ano print(f'Com {idade} anos, sua situação se voto é ', end='') if idade < 16: return 'NEGADO!' elif 18 > idade or idade > 65: return 'OPCIONAL' else: return 'OBRIGATÓRIO!'
5de81e2473a1c8037a9cfe62f54db6ce1e1a14c7
44,584
def join_population_data(daily_data, population_data): """ Merges daily_data and population_data dataframes Parameters ---------- daily_data : df dataframe of daily observation population_data : df dataframe of population Returns ------- merged df merged dataframe from daily_data and population_data """ return daily_data.merge(population_data, how = 'left', on = 'Country/Region')
56086e59a60342b1c994bba09ccf66d6fa02f379
44,585
import re def is_allowed_anonymous_path(path, method): """Checks if a given path and method is allowed for accessing without being authenticated""" allowed_regex_paths = [['/assignments/.*', ['POST']]] for item in allowed_regex_paths: regex_path, allowed_methods = item[0], item[1] pattern = re.compile(regex_path) if pattern.match(path) and method in allowed_methods: return True return False
8a8566c321b8657d345ada8910793b2f515f81a3
44,588
def join(*parts): """ Join path name components, inserting ``/`` as needed. If any component looks like an absolute path (i.e., it starts with ``hdfs:`` or ``file:``), all previous components will be discarded. Note that this is *not* the reverse of :func:`split`, but rather a specialized version of os.path.join. No check is made to determine whether the returned string is a valid HDFS path. """ try: path = [parts[0].rstrip("/")] except IndexError: raise TypeError("need at least one argument") for p in parts[1:]: p = p.strip("/") if p.startswith('hdfs:') or p.startswith('file:'): path = [p] else: path.append(p) return "/".join(path)
e1d478740417df0b30dcda33e8893f7cb37a0159
44,590
from typing import Dict def _create_ensg_pkg_map() -> Dict: """Reads the text file that was generated when installing ensg R packages, and returns a map whose keys are chip names and values are the corresponding BrainArray ensg package name. """ ensg_pkg_filename = "/home/user/r_ensg_probe_pkgs.txt" chip2pkg = dict() with open(ensg_pkg_filename) as file_handler: for line in file_handler: tokens = line.strip("\n").split("\t") # tokens[0] is (normalized) chip name, # tokens[1] is the package's URL in this format: # http://mbni.org/customcdf/<version>/ensg.download/<pkg>_22.0.0.tar.gz pkg_name = tokens[1].split("/")[-1].split("_")[0] chip2pkg[tokens[0]] = pkg_name return chip2pkg
01c542e025e558cf319228a84cbfeb89d2786ac1
44,591
def attr(name): """Produces a function that accesses an attribute :param name: Name of an attribute :returns: A function that when applied to an instance returns the value for the attribute 'name' :rtype: function """ def _attr(obj): """Wrapped function for accessing an attribute The attribute 'name' is defined in the enclosing closure. :param dictionary: an object :returns: Value of the 'name' attribute or '' """ return getattr(obj, name) if hasattr(obj, name) else '' return _attr
5f6a1e1eb3a789a4828aaf54b25c246b8bfee4f8
44,593
def is_record_package(data): """ Returns whether the data is a record package. A record package has a required ``records`` field. Its other required fields are shared with release packages. """ return 'records' in data
c55734ef447f74c9f36d8c18d0cfe642aaf37178
44,595
def roman2digits(d): """ >>> roman2digits('Ⅱ') 2 >>> roman2digits('Ⅻ') 12 """ rmn = '~ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ' if d in rmn: return rmn.index(d) else: raise NotImplemented
9bc6b11e2e29a5c108d508f385622fa7470c6640
44,596
import requests import json def AuthenticateToKolide(base_url, username, passowrd): """ Input: Take in Kolide base URL, username, and password Output: Return Kolide JWT https://github.com/CptOfEvilMinions/BlogProjects/blob/master/kolide-api-ansible/kolide_websocket_client.py#L127 """ data = { "Username": username, "Password": passowrd } url = f"{base_url}/api/v1/kolide/login" r = requests.post(url=url, data=json.dumps(data), verify=False) return r.json()['token']
20295ab29bc0b2e725d757e455ba47731bc411f2
44,598
from typing import Tuple from typing import Union import requests def check_url(url: str) -> Tuple[bool, Union[str, requests.Response]]: """Returns information on the availability of the url Parameters ---------- url : str The url to test Returns ------- Tuple[bool, Union[str, Response]] Whether the url is available and a string reponse """ try: response = requests.head(url, allow_redirects=False) return True, response except requests.exceptions.SSLError: return False, "SSL error" except requests.exceptions.ConnectionError: return False, "Connection error" except requests.exceptions.InvalidSchema: return False, "Invalid schema" except requests.exceptions.MissingSchema: return check_url("https://" + url)
09ed074bd8f71288788a4265e98f23aa953a6969
44,599
def invalid_ride_id(): """Returns a message for invalid id""" return {"msg": "invalid id"}
66d376fe73cbdfab67a2355b96cce052c4b472aa
44,600
def getAtList(l, idx, default=None): """ Safe .get for lists """ try: return l[idx] except IndexError: return default
06a168f4fec0573355a93500f6591491438e6452
44,601
def super_roar(): """ Does a super roar! """ return "Super Roarr!!"
ace42afddb5a8ea1d05e8b9bf5de5f17151d9c79
44,602
import json def extract_msmt_pts_from_config(config_filename): """ This is a dummy version that """ config_opened = open(config_filename, 'r') config_dict = json.load(config_opened) config_opened.close() return config_dict['measurement_points']
9c7126ed00d8663431b1f353bb3fe667ef8868a3
44,603
def _annuity_pv_factor(r,n): """Present value factor for an annuity. Formula equivalent to C/r + r + C/(1+r)**2 + ... + C/(1+r)**n Parameters ----------. r: float Interest rate n: int Number of payments """ return (1 - (1/(1+r)**n)) / r
4216ba927975ef2313a41ef135957119e1930e8d
44,604
from typing import List def read_list_from_file(fpath: str, skip_header: bool=False) -> List[str]: """Parse a file into an array of strings, splitting on newline, and optionally skipping the first row. Args: fpath (str): File to read. skip_header (bool, optional): If True, the first line is skipped as a header. Defaults to False. Returns: List[str]: Lines of the file, one list entry per line. """ with open(fpath, 'r') as fp: lines = fp.read().splitlines() if skip_header: return lines[1:] else: return lines
1283fe1c8ea6847516153271c4dd0695ab5b60da
44,605
import re def camel_to_snake(text_string): """Transform a CamelCase string into snake_case """ FIRST_CAP_RE = re.compile('(.)([A-Z][a-z]+)') ALL_CAP_RE = re.compile('([a-z0-9])([A-Z])') s1 = FIRST_CAP_RE.sub(r'\1_\2', text_string) return ALL_CAP_RE.sub(r'\1_\2', s1).lower()
d3c06d6a380546e0fcea606690338dae9212e168
44,606
def select_daily_mean(daily_mean, gw_data): """ Select the lakes in the daily_mean file that are retained in the final growth window. Input: daily_mean: dataframe with all compiled daily mean water quality data gw_data: growth window dataframe (output from the growth_window_means function) Output: selected_daily_mean: Dataframe of daily mean data for all lakes within the growth window dataset """ final_lakes_list = gw_data.lake.unique() boolean_series = daily_mean.lake.isin(final_lakes_list) selected_daily_mean = daily_mean[boolean_series] return selected_daily_mean
331bead7dcbe17086f52b247f807be87d5fe0e43
44,607
def compute_flesch_reading_ease(total_syllables, total_words, total_sentences): """ Computes readability score from summary statistics :param total_syllables: number of syllables in input text :param total_words: number of words in input text :param total_sentences: number of sentences in input text :return: A readability score: the lower the score, the more complex the text is deemed to be """ return ( 206.85 - 1.015 * (total_words / total_sentences) - 84.6 * (total_syllables / total_words) )
8b0bc43274766dd0f2e3f7b585f79bf1ccd427dc
44,609
import yaml def load_from_yml(filename): """Load an experiment configuration from a yaml file. For examples, take a look at `data` folder Args: filename (str): The name of the data file (use full or relative path) Returns: tuple: The first value in the tuple is an iterable of cars in a sequence. The second returned value is a mapping with ensemble and number of black cars as keys and values, respectively. """ with open(filename, 'r') as file_handle: data = next(iter(yaml.safe_load_all(file_handle))) sequence = data['sequence'] k = data['counts'] return sequence, k
281f2440e1d9b2e513aa38faf732ca3ec1c478ea
44,610
def solicitar_entero_valido(mensaje): """ Solicita un número entero y lo sigue solicitando mientras no sea un entero válido """ while True: try: posible_entero = int(input(mensaje)) return posible_entero except ValueError: continue
c576e418e559d7c6f50a03d767e001b8613ea89c
44,612
def find_anchor(bound_start_anchor, start_anchor_annots): """ Finds the first available anchor gene and returns its position arguments: bound_start_anchor: a tuple containing tuples of: (feature name, "start"/"end") start_anchor_annots: a list containing all the possible start anchors (even ones that are low-priority) return: the annotation of the anchor the anchor's specification """ for s_anch in bound_start_anchor: for annot in start_anchor_annots: if annot.qualifiers["product"][0] == s_anch[0]: anch_pos = annot return annot, s_anch # Bonus: this breaks both loops return None, None
55492198560008c3c4940d185be425f0a01d29d8
44,613
def count_marquage (marquage): """Compte le nombre de marquage effectue""" marquage_ligne = marquage['ligne'] marquage_colonne = marquage['colonne'] nb = 0 for elt in marquage_ligne: nb_occur = marquage_ligne.count(elt) if nb < nb_occur: nb = nb_occur for elt in marquage_colonne: nb_occur = marquage_colonne.count(elt) if nb < nb_occur: nb = nb_occur return nb
03632cc1133c2fe4aa2db2a8e3ec19d806524708
44,615
import argparse import sys def get_parsed_in(): """ Parse command line inputs @return: arguments dictionary containing parsed command line inputs """ parser = argparse.ArgumentParser(description="Configurations to run normalization experiments with ResNet") parser.add_argument('--replace', action='store_true', help='overwrite previous run if it exists') parser.add_argument('--continue', action='store_true', help='continue training: load the latest checkpoint and continue training') parser.add_argument('--cont_epoch', type=int, default=-1, help='Used together with continue, overwrites the saved epoch of the checkpoint and sets the initial epoch for continue training') parser.add_argument('--experimental_data_aug', action='store_true', help='Use experimental data augmentation inside the network instead of the one before the network (Only used for epochs = 100)') parser.add_argument('--cpu', action='store_true', help='train on only cpu') parser.add_argument('--ResNet', type=int, default=3, choices=[3, 5], help='Defines what Resnet model to use, 3-> ResNet 20, 5-> ResNet 32') parser.add_argument('--batch_size', type=int, default=32, choices=[32, 16, 8, 4, 2], help='batch size per worker') parser.add_argument('--epochs', type=int, default=30, choices=[30, 100], help='training epochs') parser.add_argument('--norm', type=str, default='GN', choices=['BN', 'GN'], help='decide if BN (batch normalization) or GN (group normalization is applied)') parser.add_argument('--run', type=int, default=1, help='Differentiate multiple runs for statistical comparison') parser.add_argument('--weight_decay', action='store_true', help='Set to use SGDW (stochastic gradient with weight decay) as optimizer and use weight decay (unstable) otherwise adam is used.') arguments = vars(parser.parse_args()) if arguments['continue'] and arguments['replace']: print("Incompatible options to continue training and remove it to replace choose one or the other") sys.exit() return arguments
5b2c97cce02cfe03c58a2061131e9703cbe109c0
44,616
def pearson_correlation_2(x,y): """incase pandas library is not allowed""" xy = [] x2 = [] y2 = [] for i,j in zip(x,y): xy.append(i*j) x2.append(pow(i,2)) y2.append(pow(j,2)) n = len(x) sxy = sum(xy) sx = sum(x) sy = sum(y) sx2 = sum(x2) sy2 = sum(y2) top = (n*sxy) - (sx*sy) mid = ((n*sx2)-pow(sx,2))*((n*sy2)-pow(sy,2)) bot = pow(mid,0.5) return 1.0*top/bot
44c00db5e4faa8e6809eac1e7f2e9dd98fdbfa48
44,617
def potential_parents_in_chain(chain): """ Returns a dictionary of superbubbles and the chains they belong to """ poten_parents = dict() counter = 0 # for chain in graph.b_chains.values(): for b in chain.sorted: if b.is_super(): poten_parents[b.key] = (chain, b) return poten_parents
3a8a4bd2f0c23deb1e2417fdb3976a77f36b0dd5
44,618
def make_param_name_multiple_index(param_parts): """ Make the key name from param parts. For example, ("param", "tag", "2", "1") -> ("param2", "1"). """ return (param_parts[0] + param_parts[-2], param_parts[-1])
cc3cbad59bc89273bc35ba8811f1f5d202bc8c77
44,619
def unique_species(ini0): """ Return the list of different chemical elements there are in the current structure. """ natom = ini0["natom"] elmlist = [] for ii in range(0,natom): symbol = ini0["symbol"][ii] if not symbol in elmlist: elmlist.append(symbol) return elmlist
3c4da68118cf057ec33e46cbe204a8706e75938d
44,620
from typing import Any def get_cls_name(obj: Any, package_name: bool = True) -> str: """ Get name of class from object Args: obj (Any): any object package_name (bool): append package origin at the beginning Returns: str: name of class """ cls_name = str(obj.__class__) # remove class prefix cls_name = cls_name.split('\'')[1] # split modules cls_split = cls_name.split('.') if len(cls_split) > 1: cls_name = cls_split[0] + '.' + cls_split[-1] if package_name else cls_split[-1] else: cls_name = cls_split[0] return cls_name
6eb9a5b8b2ac4b33b988a90ba5f1988633179295
44,624
def spacydoc2tokens(doc): """ Transform spaCy doc to tokens list. :param doc: spaCy doc :return: list of token texts """ return [token.text for token in doc]
23ca1cdf9395cac883719dedcf34748701484f3c
44,625
def break_camel(s): """ Time complexity: O(n). Space complexity: O(n). """ ls = [] for c in s: if c.isupper(): ls.append(' ') ls.append(c) return ''.join(ls)
395e8af42718bc89cae097dffc330cf53d278c23
44,626
def in_inner_list(item, item_list): """ 判断 item 是否在列表内的列表里 :param item: 需要判断的对象 :param item_list: <list of list of item> :return: """ for item_ in item_list: # 若 item 在其中一个列表 item_ 中 # 则返回 item_ if item in item_: return item_ # 不存在则返回 False return False
bc4ad9ea415f76c22630b20ca8b5eed0428f3a18
44,627
import tempfile import shutil def temporaryIPCPath(request, monkeypatch): """ Create a new temporary directory and set c4.messaging.zeromqMessaging.DEFAULT_IPC_PATH to it """ newpath = tempfile.mkdtemp(dir="/dev/shm") # newpath = tempfile.mkdtemp(dir="/tmp") monkeypatch.setattr("c4.messaging.zeromqMessaging.DEFAULT_IPC_PATH", newpath) def removeTemporaryDirectory(): shutil.rmtree(newpath) request.addfinalizer(removeTemporaryDirectory) return newpath
fd903837670cbee708b30a89985966c9e2c461ef
44,628
import optparse def setup_parser(): """Setup the commandline parser.""" config_parser = optparse.OptionParser() config_parser.add_option( '-v', '--verbose', help='Be more verbose', dest="verbose", action="store_true", default=False) config_parser.add_option( '-s', '--search', help='Search the geolocation database directly', dest="search", default=None) config_parser.add_option( '-w', '--width', help='Output width, in characters', dest="width", default=None) config_parser.add_option( '-l', '--list', help='List all recognized timezones', dest="list", action="store_true", default=False) config_parser.add_option( '-t', '--twelve', help='Use 12-hour clock times', dest="twelve", action="store_true", default=False) config_parser.add_option( '-d', '--date', help='Calculate the grid based on this Date/Time (default=now)', dest="date") config_parser.add_option( '-u', '--utc', help='Use list of utc offsets', dest="utc", action="store_true", default=False) return config_parser
0b504dd3dbeb83503bd29137303485a38be4e046
44,629