content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _escape(input_string: str) -> str: """ Adopted from https://github.com/titan550/bcpy/blob/master/bcpy/format_file_builder.py#L25 """ return ( input_string.replace('"', '\\"') .replace("'", "\\'") .replace("\r", "\\r") .replace("\n", "\\n") )
65cfa65d2dc7acdfe80d5a7555b48f19bb267955
17,172
def make_word_class_dict(word_info): """ 根据数据库查询结果,构造词-类字典 :param word_info: 数据库中记录 :return: 根据查询结果建立的词-类字典 """ word_class_dict = {} for entry in word_info: word = entry[1] word_class = entry[5] # 第五列为词语的类别字段 word_class_dict[word] = word_class return word_class_dict
5bd29c1145d02eea18e750261e724c78b0e31f33
17,175
import re def baselineNumber (Title): """Extract the processing baseline number from the given product title.""" return re.sub(r".+_N(\d{4})_.+", r"\1", Title)
c69d099c6173cb771d14d35f520f12f32079229f
17,178
import html def html_unescape(text: str) -> str: """ convert HTML content to readable 1. unescape html 2. remove some empty code Args: text: Returns: """ text = html.unescape(text).strip() text = text.replace("\xa0", "") return text
ed6fe87ab678ef4094248d2047d046d992a1db59
17,179
def _charlson(): """ Wrapper function for charlson comorbidity dictionary """ return { 'Myocardial infarction':{ 10:['I21.x', 'I22.x', 'I25.2'], 9:[ '410.x', '412.x'], 'Original':1, 'Quan11':0, }, 'Congestive heart failure':{ 10:['I09.9', 'I11.0', 'I13.0', 'I13.2', 'I25.5', 'I42.0', 'I42.5-I42.9', 'I43.x', 'I50.x', 'P29.0'], 9:['398.91', '402.01', '402.11', '402.91', '404.01', '404.03', '404.11', '404.13', '404.91', '404.93', '425.4-425.9', '428.x'], 'Original':1, 'Quan11':2, }, 'Peripheral vascular disease':{ 10:['I70.x', 'I71.x', 'I73.1', 'I73.8', 'I73.9', 'I77.1', 'I79.0', 'I79.2', 'K55.1', 'K55.8', 'K55.9', 'Z95.8', 'Z95.9'], 9:['093.0', '437.3', '440.x', '441.x', '443.1-443.9', '47.1', '557.1', '557.9', 'V43.4'], 'Original':1, 'Quan11':0, }, 'Cerebrovascular disease':{ 10:['G45.x', 'G46.x', 'H34.0', 'I60.x-I69.x'], 9:['362.34', '430.x-438.x'], 'Original':1, 'Quan11':0, }, 'Dementia':{ 10:['F00.x-F03.x', 'F05.1', 'G30.x', 'G31.1'], 9:['290.x', '294.1', '331.2'], 'Original':1, 'Quan11':2, }, 'Chronic pulmonary disease':{ 10:['I27.8', 'I27.9', 'J40.x-J47.x', 'J60.x-J67.x', 'J68.4', 'J70.1', 'J70.3'], 9:['416.8', '416.9', '490.x-505.x', '506.4', '508.1', '508.8'], 'Original':1, 'Quan11':1, }, 'Rheumatic disease':{ 10:['M05.x', 'M06.x', 'M31.5', 'M32.x-M34.x', 'M35.1', 'M35.3', 'M36.0'], 9:['446.5', '710.0-710.4', '714.0-714.2', '714.8', '725.x'], 'Original':1, 'Quan11':1, }, 'Peptic ulcer disease':{ 10:['K25.x-K28.x'], 9:['531.x-534.x'], 'Original':1, 'Quan11':0, }, 'Mild liver disease':{ 10:['B18.x', 'K70.0-K70.3', 'K70.9', 'K71.3-K71.5', 'K71.7', 'K73.x', 'K74.x', 'K76.0', 'K76.2-K76.4', 'K76.8', 'K76.9', 'Z94.4'], 9:['070.22', '070.23', '070.32', '070.33', '070.44', '070.54', '070.6', '070.9', '570.x', '571.x', '573.3', '573.4', '573.8', '573.9', 'V42.7'], 'Original':1, 'Quan11':2, }, 'Diabetes without chronic complication':{ 10:['E10.0', 'E10.1', 'E10.6', 'E10.8', 'E10.9', 'E11.0', 'E11.1', 'E11.6', 'E11.8', 'E11.9', 'E12.0', 'E12.1', 'E12.6', 'E12.8', 'E12.9', 'E13.0', 'E13.1', 'E13.6', 'E13.8', 'E13.9', 'E14.0', 'E14.1', 'E14.6', 'E14.8', 'E14.9'], 9:['250.0-250.3', '250.8', '250.9'], 'Original':1, 'Quan11':0, }, 'Diabetes with chronic complication':{ 10:['E10.2-E10.5', 'E10.7', 'E11.2-E11.5', 'E11.7', 'E12.2-E12.5', 'E12.7', 'E13.2-E13.5', 'E13.7', 'E14.2-E14.5', 'E14.7'], 9:['250.4-250.7'], 'Original':2, 'Quan11':1, }, 'Hemiplegia or paraplegia':{ 10:['G04.1', 'G11.4', 'G80.1', 'G80.2', 'G81.x', 'G82.x', 'G83.0-G83.4', 'G83.9'], 9:['334.1', '342.x', '343.x', '344.0-344.6', '344.9'], 'Original':2, 'Quan11':2, }, 'Renal disease':{ 10:['I12.0', 'I13.1', 'N03.2-N03.7', 'N05.2-N05.7', 'N18.x', 'N19.x', 'N25.0', 'Z49.0-Z49.2', 'Z94.0', 'Z99.2'], 9:['403.01', '403.11', '403.91', '404.02', '404.03', '404.12', '404.13', '404.92', '404.93', '582.x', '583.0-583.7', '585.x', '586.x', '588.0', 'V42.0', 'V45.1', 'V56.x'], 'Original':2, 'Quan11':1, }, 'Any malignancy, including lymphoma and leukemia, except malignant neoplasm of skin':{ 10:['C00.x-C26.x', 'C30.x-C34.x', 'C37.x-C41.x', 'C43.x', 'C45.x-C58.x', 'C60.x-C76.x', 'C81.x-C85.x', 'C88.x', 'C90.x-C97.x'], 9:['140.x-172.x', '174.x-195.8', '200.x-208.x', '238.6'], 'Original':2, 'Quan11':2, }, 'Moderate or severe liver disease':{ 10:['I85.0', 'I85.9', 'I86.4', 'I98.2', 'K70.4', 'K71.1', 'K72.1', 'K72.9', 'K76.5', 'K76.6', 'K76.7'], 9:['456.0-456.2', '572.2-572.8'], 'Original':3, 'Quan11':4, }, 'Metastatic solid tumor':{ 10:['C77.x-C80.x'], 9:['196.x-199.x'], 'Original':6, 'Quan11':6, }, 'AIDS/HIV':{ 10:['B20.x-B22.x', 'B24.x'], 9:['042.x-044.x'], 'Original':6, 'Quan11':4, }, }
3248380ef5e717b3761ffded11db17631311c5c6
17,180
def _is_expecting_event(event_recv_list): """ check for more event is expected in event list Args: event_recv_list: list of events Returns: result: True if more events are expected. False if not. """ for state in event_recv_list: if state is False: return True return False
befbe6a614ede6a7e5b59d3bda9c148c04ddadde
17,182
def get_orders_dict(orders): """Form a dictionary of current order buys and sells """ list_orders = list(orders) orders_dict = {} orders_dict["sells"] = [] orders_dict["buys"] = [] for order in list_orders: if order["side"] == "sell": temp_price = round(float(order["price"]), 2) orders_dict["sells"].append(temp_price) if order["side"] == "buy": temp_price = round(float(order["price"]), 2) orders_dict["buys"].append(temp_price) return orders_dict
9d126d759dd0b3da7c584f6d4163243d8b2cee43
17,183
def data_gen(list_length): """ >>> data_gen(3) {'numbers_list': [0, 1, 2]} >>> data_gen(7) {'numbers_list': [0, 1, 2, 3, 4, 5, 6]} """ numbers_list = [number for number in range(list_length)] return {"numbers_list" : numbers_list}
265c5925c086afc04816ac3e965f0487d31ec9d9
17,184
import re def get_offer_total_floors(html_parser, default_value=''): """ This method returns the maximal number of floors in the building. :param html_parser: a BeautifulSoup object :rtype: string :return: The maximal floor number """ # searching dom for floor data floor_raw_data = html_parser.find(class_="param_floor_no") if hasattr(floor_raw_data, 'span'): floor_data = floor_raw_data.span.text else: return default_value # extracting information about floor match = re.search(r"\w+\s(?P<total>\d+)", floor_data) total_floors = default_value if match: total_floors = match.groupdict().get("total") return total_floors
170e75a04104f6fa1c544788c3d25324edd6b2e8
17,185
import functools def request(req=None, method=None, requires_response=True): """Call function req and then emit its results to the LSP server.""" if req is None: return functools.partial(request, method=method, requires_response=requires_response) @functools.wraps(req) def wrapper(self, *args, **kwargs): if self.lsp_ready: params = req(self, *args, **kwargs) if params is not None: self.emit_request(method, params, requires_response) return wrapper
925ab1289389a8a2402dde4c0a96002f64833010
17,186
from typing import Counter def word_lookup(words: list, rand_string: str): """ This function takes the list of words that can be used for the game and the random string of consonants and vowels as arguments. The function then iterates through every word in the list. A list is created from the random string. Then the function iterates through every letter in the word, if the letter is in the random string list, it is appended to the letters_found variable. Once this is complete, we subtract the character occurrences in both the word, and the letters_found variable; if the length of the resulting dictionary is 0, the word is added to the possible answers list. The function then checks the length of the word found, if it is longer than the current longest word, its length is set as the new longest word. After all the possible answers are found, the function creates a list of longest answers by using the length of the longest word and appending all words that are this long to the longest answers list. :param words: The list of words parsed by the dictionary_reader function :type words: list :param rand_string: The string of random characters generated by the user :type rand_string: str :return: The list of longest_answers, and the list of possible_answers :rtype: list """ longest_word_length: int = 0 possible_answers: list = [] longest_answers: list = [] for word in words: letters_found: str = "" rand: list = list(rand_string) for letter in word: if letter in rand: letters_found += rand.pop(rand.index(letter)) if len(Counter(word) - Counter(letters_found)) == 0: possible_answers.append(word) if len(word) > longest_word_length: longest_word_length = len(word) for word in possible_answers: if len(word) == longest_word_length: longest_answers.append(word) return longest_answers, possible_answers
4c5ab00cc318a3c9a1ec357750495ee08ce99499
17,187
def rreplace(s, old, new, occurrence): """This function performs a search-and-replace on a string for a given number of occurrences, but works from the back to the front. :param s: string to manipulate :param old: substring to search for :param new: substring to replace with :param occurrence: number of occurrences :return: Modified string. """ li = s.rsplit(old, occurrence) return new.join(li)
eac6d5ffb8adb7940e6d3374eec130cafcc311e7
17,188
import requests def get_all_episodes(cfg, series_id): """ Request all episodes within a series. :param series_id: Unique identifier for series :param cfg: Opencast configuration :return: List of pair of eventIds and titles for episodes """ url = cfg['uri'] + "/api/events" params = {"filter": "is_part_of:" + series_id, "sort": "start_date:ASC"} result = [] r = requests.get(url=url, params=params, auth=(cfg['user'], cfg['password'])) json_data = r.json() for elem in json_data: result.append([elem['identifier'], elem['title']]) return result
05ea8c7c36641ec5ed3dacebe9579a903ef01fe7
17,189
def default_sid_function(id, rsid): """ The default function for turning a Bgen (SNP) id and rsid into a :attr:`pysnptools.distreader.DistReader.sid`. If the Bgen rsid is '' or '0', the sid will be the (SNP) id. Otherwise, the sid will be 'ID,RSID' >>> default_sid_function('SNP1','rs102343') 'SNP1,rs102343' >>> default_sid_function('SNP1','0') 'SNP1' """ if rsid == "0" or rsid == "": return id else: return id + "," + rsid
f13b8adab14eb4476a151059938eddf9763d35ef
17,190
def filter_BF_matches(matches: list, threshold=45) -> list: """ filter matches list and keep the best matches according to threshold :param matches: a list of matches :param threshold: threshold filtering :return: matches_tmp: list of the best matches """ matches_tmp = [] sorted_matches = sorted(matches, key=lambda x: x.distance) threshold_percent = int(len(sorted_matches) * threshold / 100) for match_index in range(threshold_percent): matches_tmp.append([sorted_matches[match_index].queryIdx, sorted_matches[match_index].trainIdx]) return matches_tmp
cce90d95f72b00148355552d0284b87cb16a40c1
17,192
def get_school_total_funding(school_id, aug_school_info): """ Gets (total) funding associated with a school. Args: district_id (str): NCES ID of target school (e.g. '010000500889'). aug_school_info (pandas.DataFrame): Target augmented school information (as formatted by `auxiliary.data_handler.DataHandler`). Returns: float: Single number comprising school-level data. """ return float(aug_school_info.loc[school_id]["adjusted_total_revenue_per_student"] * aug_school_info.loc[school_id]["total_students"])
1827c5499d5af0c06e213b7a1517647d63e9c7ad
17,193
def _convert_key_and_value(key, value): """Helper function to convert the provided key and value pair (from a dictionary) to a string. Args: key (str): The key in the dictionary. value: The value for this key. Returns: str: The provided key value pair as a string. """ updated_key = f'"{key}"' if isinstance(key, str) else key updated_value = f'"{value}"' if isinstance(value, str) else value return f"{updated_key}: {updated_value}, "
075c0a9a7fe54c35c19296e3da827484b579d4c8
17,194
def max_value_bits(b): """ Get maximum (unsigned) value of a given integer bit size variable. Parameters ---------- b : int Number of bits (binary values) that are used to describe a putative variable. Returns ------- max_value : int Maximum value that putative variable can hold (integer unsigned). """ return (2 ** b) - 1
24041ed8833da09c1ecc8dea1c12f63ca7b29ed0
17,195
def aligned_output(cols, indent, tab_size=4): """ Pretty printing function to output tabular data containing multiple columns of text, left-aligned. The first column is aligned at an indentation of "indent". Each successive column is aligned on a suitable multiple of the "tab_size" with spaces for all indentation. "cols" is assumed to be a list of columns, with each column holding an equal length list of string values. """ # Check the input data ncols = len(cols) if ncols == 0: return "" nrows = len(cols[0]) if nrows == 0: return "" # Work out the indentations and widths of each column indents = [ indent ] widths = [] for col in range(1, ncols): width = max(len(x) for x in cols[col-1]) indents.append(((indents[col-1]+width+tab_size) / tab_size) * tab_size) widths.append(indents[col] - indents[col-1]) # Now output the actual tabular values result = "" for row in range(0, nrows): if row > 0: result += ",\n" + (" " * indent) if len(cols) > 1: for col in range(0, ncols-1): result += cols[col][row].ljust(widths[col]) result += cols[-1][row] return result
856391043607c4570f75b804917c10b4c4b42dc1
17,197
def max_path_sum_in_triangle(triangle): """ Finds the maximum sum path in a triangle(tree) and returns it :param triangle: :return: maximum sum path in the given tree :rtype: int """ length = len(triangle) for _ in range(length - 1): a = triangle[-1] b = triangle[-2] for y in range(len(b)): b[y] += max(a[y], a[y + 1]) triangle.pop(-1) triangle[-1] = b return triangle[0][0]
bbac81a717e3e8ceeedf1a3d61e0ae3915e3efce
17,198
import re def remove_blank(text): """ Args: text (str): input text, contains blank between zh and en, zh and zh, en and en Returns: str: text without blank between zh and en, zh and zh, but keep en and en Examples: >>> text = "比如 Convolutional Neural Network,CNN 对应中 文是卷 积神 经网络。" >>> remove_blank(text) "比如Convolutional Neural Network,CNN对应中文是卷积神经网络。" """ # filter blank space between Chinese characters text = re.sub(r'([^a-zA-Z])([\u0020]*)', r'\1', text) # remove blank space between English characters and Chinese characters text = re.sub(r'([\u0020]*)([^a-zA-Z])', r'\2', text) return text
8b2093254aeefc26e72c507f0ec5f9e7400a41ea
17,199
def last_of_list(the_list): """Return the last item of the provided list.""" if len(the_list) == 0: return None return the_list[len(the_list) - 1]
b88bf4c2f55497093888cebe703a14c1eb45199d
17,202
import torch def get_mean_std(loader): """Calculate mean and standard deviation of the dataset Args: loader (instance): torch instance for data loader Returns: tensor: mean and std of data """ channel_sum, channel_squared_sum, num_batches = 0,0,0 for img,_ in loader: channel_sum += torch.mean(img/255., dim=[0,1,2]) channel_squared_sum += torch.mean((img/255.)**2, dim=[0,1,2]) num_batches += 1 mean = channel_sum / num_batches std = (channel_squared_sum/num_batches - mean**2)**0.5 print("The mean of dataset : ", mean) print("The std of dataset : ", std) return mean,std
f5ee2a66edc5925aec3f78811c8ec6b8b943a1d3
17,203
from typing import Tuple def get_sweep_time_ascii( data: str, sweep_b: Tuple[int, int], time_b: Tuple[int, int] ) -> Tuple[int, int]: """Get sweep and time from a given ASCII string. :param data: ASCII string :param sweep_b: Boundaries of sweep :param time_b: Boundaries of time :return: sweep, time """ sweep_val = int(data[sweep_b[0] : sweep_b[1]], 2) time_val = int(data[time_b[0] : time_b[1]], 2) return sweep_val, time_val
f1848b70439314dff5d4a5e50ae0706f64315378
17,204
def getFrameLength(socketFile, boundary, skipLines): """Get frame length from stream""" line = socketFile.readline() # Find boundary while len(line) > 0 and line.count(boundary) == 0: line = socketFile.readline() length = 0 # Read in chunk headers while len(line) > 0 and line.strip() != "" and length == 0: parts = line.split(b":") if len(parts) > 1 and parts[0].lower().count(b"content-length") > 0: # Grab chunk length length = int(parts[1].strip()) # Skip lines before image data i = skipLines while i > 0: line = socketFile.readline() i -= 1 else: line = socketFile.readline() return length
c21d3baa8994d86aae94dd9bafdbb65d29db14ec
17,205
def bubbles_from_fixed_threshold(data, threshold=0, upper_lim=True): """ @ Giri at al. (2018a) It is a method to identify regions of interest in noisy images. The method uses a fixed threshold. Parameters ---------- data : ndarray The brightness temperature or ionization fraction cube. threshold : float The fixed threshold value (Default: 0). upper_lim : bool This decides which mode in the PDF is to be identified. 'True' identifies ionized regions in brightness temperature, while 'False' identifies in the xfrac data (Default: True). Returns ------- Binary cube where pixels identified as region of interest are the True. """ if upper_lim: return (data<=threshold) else: return (data>=threshold)
63977ae51eaa80a99b8325124e2d78b98f61b549
17,206
def coords_deplacees(c, direction): """ Permet d'avoir les nouveaux coordonnées du gardien :param c: liste avec les coordonnées du gardien :param direction: direction du placement du gardien :return: liste avec les nouveaux coordonnées du gardien """ if direction == 'haut': c[0] -= 1 elif direction == 'bas': c[0] += 1 elif direction == 'gauche': c[1] -= 1 elif direction == 'droite': c[1] += 1 return c
169eb358cea335fb18e91376c487767ba0468150
17,207
import re def lower_case_all_tags(s): """ Change all the tags to lower-case """ return re.sub(r'(<.*?>)', lambda pat: pat.group(1).lower(), s, flags=re.IGNORECASE)
54b8dfdeb81e7cc21c930fd97e1787616a2a8939
17,209
import binascii def unpack(packet): """ unpack a packet into its hex """ return binascii.unhexlify(packet)
8ab24367d583cea1e44e283a9d10298c0f8ca0ee
17,210
import argparse import pathlib def get_parser(): """Returns an argument parser.""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( '--out', required=True, type=pathlib.Path, help='file to output diff result') parser.add_argument( '--cpp', required=True, type=pathlib.Path, help='header file generated by generate-chromeos-dbus-bindings') parser.add_argument( '--go', required=True, type=pathlib.Path, help='header file generated by go-generate-chromeos-dbus-bindings') return parser
793bd88219906f88154b5d8176b1c2c6e7373bc6
17,211
def check(file1, file2): """Compare file1 and file2. Ignore leading and trailing whitespaces of the file""" with open(file1) as f1: test_output = f1.read() with open(file2) as f2: ref_output = f2.read() #p = subprocess.run(['diff', file1,file2], stdout=subprocess.PIPE) #print(p.stdout) return test_output.strip() == ref_output.strip() #return test_output == ref_output
3a10065ea681188fd4453bade2a04207aadf954a
17,213
def trim_lcs(lcs : dict, cut_requirement : int = 0) -> dict : """ Remove epochs from a lightcurve that occur before object discovery, which is defined as the first mjd with SNR >= 3.0. Args: lcs (dict): dictionary from a lightcurve file cut_requirement (int, default=0): cut number to require Returns: copy of lcs with pre-discovery epochs removed from each lightcurve """ out_lcs = {} for snid, info in lcs.items(): # Skip if the lightcurve will get cut during feature extraction if not (info['cut'] == -1 or info['cut'] > cut_requirement): continue # Get discovery MJD, skip if light curve is never discovered flux = info['lightcurve']['FLUXCAL'].values.astype(float) fluxerr = info['lightcurve']['FLUXCALERR'].values.astype(float) detection_mask = ((flux / fluxerr) >= 3.0) if sum(detection_mask) == 0: continue mjds = info['lightcurve']['MJD'].values.astype(float) mjd0 = mjds[detection_mask].min() # Trim lightcurve lc = info['lightcurve'][mjds >= mjd0].copy().reset_index(drop=True) # Store results out_lcs[snid] = {'lightcurve': lc, 'cut': info['cut']} return out_lcs
0071682cebb6cca56d93eb808d3d662d8e908787
17,214
def format_data_types(s): """ apply the correct data type to each value in the list created from a comma separated sting "s" x1: PDB ID (string) x2: Macro molecule overlaps (int) x3: Symmetry overlaps (int) x4: All overlaps (int) x5: Macro molecule overlaps per 1000 atoms (float) x6: Symmetry overlaps per 1000 atoms (float) x7: All overlaps per 1000 atoms (float) x8: year model deposited in PDB (int) x9: experiment type (string) """ d = [x.strip() for x in s.split(',')] if len(d) == 9: # integer values for i in [1,2,3,7]: d[i] = int(d[i]) # float values for i in [4,5,6]: d[i] = round(float(d[i]),1) return d else: return None
b027e98e3fcba4439c9073936cc9bcfc6df93b9d
17,215
def prep_first_file(ds, chan): """ Take the dataset for chan and drop spurious variables and rename the main variables """ drop_vars = ( "nominal_satellite_subpoint_lat", "nominal_satellite_subpoint_lon", "nominal_satellite_height", "geospatial_lat_lon_extent", "algorithm_dynamic_input_data_container", "earth_sun_distance_anomaly_in_AU", "processing_parm_version_container", "algorithm_product_version_container", "band_id", "band_wavelength", "esun", "kappa0", "max_reflectance_factor", "mean_reflectance_factor", "min_reflectance_factor", "outlier_pixel_count", "percent_uncorrectable_GRB_errors", "percent_uncorrectable_L0_errors", "planck_bc1", "planck_bc2", "planck_fk1", "planck_fk2", "std_dev_reflectance_factor", "total_number_of_points", "valid_pixel_count", "focal_plane_temperature_threshold_exceeded_count", "maximum_focal_plane_temperature", "focal_plane_temperature_threshold_increasing", "focal_plane_temperature_threshold_decreasing", ) drop_attr = ( "id", "production_data_source", "dataset_name", "title", "summary", "processing_level", "date_created", ) out = ds.rename({"CMI": f"CMI_C{chan:02d}", "DQF": f"DQF_CMI_C{chan:02d}"}) out.attrs["timezone"] = "UTC" out.attrs["datasets"] = (ds.dataset_name,) for attr in drop_attr: del out.attrs[attr] out = out.drop(drop_vars, errors="ignore").set_coords("goes_imager_projection") return out.load()
8e192cddfcef81d482fb076bcf68a01fa0afd181
17,216
import math def choose(n,k): """Standard Choose function. :param n: The total sample size. :type n: int :param k: The number of elements you're choosing. :type k: int :return: n choose k :rtype: int """ return (math.factorial(n)/(math.factorial(k)*math.factorial(n-k)))
faf862e502971ec55a34eb8ee2909b9790252a32
17,217
import re def sanitize_target(target: str = "") -> str: """ Format target, allow only numeric character e.g. - "012345678901234" => "012345678901234" - "080-123-4567" => "0801234567" - "1-1111-11111-11-1" => "1111111111111" - "+66-89-123-4567" => "66891234567" :param target: :return: """ result = re.sub(r"\D", "", target) return result
ee490997a96a409967f7c8eebcb0e8daa28180b6
17,218
import numpy import math def analytic(x): """ analytic solution """ return numpy.cos(math.pi*x/2) + 2.0*numpy.sin(math.pi*x/2) - 1.0
953e995dfe91a7ed76ff04394f7094b894d177b1
17,219
def create_label_dict(signal_labels, backgr_labels, discard_labels): """ Create label dictionary, following the convetion: * signal_labels are mapped to 1,2,3,... * backgr_labels are mapped to 0 * discard_labels are mapped to -1 Args: signal_labels: list, or list of lists Labels of interest. Will be mapped to 1,2,3,... Several labels can be mapped to the same integer by using nested lists. For example, signal_labels=[A,[B,C]] would result in A being mapped to 1 and B and C both being mapped to 2. backgr_labels: list Labels will be grouped into a common "background" class (0). discard_labels: list Labels will be grouped into a common "discard" class (-1). Returns: label_dict: dict Dict that maps old labels to new labels. """ label_dict = dict() for l in discard_labels: label_dict[l] = -1 for l in backgr_labels: label_dict[l] = 0 num = 1 for l in signal_labels: if isinstance(l, list): for ll in l: label_dict[ll] = num else: label_dict[l] = num num += 1 return label_dict
75ef46153cb1cd1c5bc2b56ea540e89f8c5fa4b5
17,220
import os import re def list_image_dir(directory, ext='jpg|jpeg|bmp|png|ppm'): """ 列出目录下的所有图片的路径 :param directory: :param ext: :return: """ return [os.path.join(root, f) for root, _, files in os.walk(directory) for f in files if re.match(r'([\w]+\.(?:' + ext + '))', f)]
d1f881d71a4eb33481030624ec7ef406af657c7a
17,221
def search_event_using_event_name(df): """ Search event using event name :df : dataframe :return : dataframe """ event_name = input("Please enter the event name to be searched : ") event_name = df[df["event_type"] == f"{event_name}"] print(event_name["name"]) return df
c996ac2f19530b6d1d9554a85597d0b1dae46476
17,222
import json def fix_mgame_str(mgame): """Create info for handling a data game""" return json.dumps(mgame.to_json())
7663861fc38e63e1ff5f5ee34c43ed52ff25b940
17,226
def checkread(stream, n): """Read exactly *n* bytes from *stream*. A ParseError is raised if less than *n* bytes are available. """ data = stream.read(n) if len(data) < n: raise ValueError("unexpected end of file") return data
4b6dcaf1b1b553146f269f431846cbebd960ab9e
17,229
from typing import Optional import torch def check_nans_(x, warn: Optional[str] = None, value: float = 0): """Mask out all non-finite values + warn if `warn is not None`""" msk = torch.isfinite(x) if warn is not None: if ~(msk.all()): print(f'WARNING: NaNs in {warn}') x.masked_fill_(msk.bitwise_not(), value) return x
5817f1488e7f187af6f68f97f3e943115a08e066
17,230
def get_element_text(element): """Builds the element text by iterating through child elements. Parameters ---------- element: lxml.Element The element for which to build text. Returns ------- text: str The inner text of the element. """ text = ''.join(element.itertext()) return text
41409e83a23927a5af0818c5f3faace0ca117751
17,234
def _sum_counters(*counters_list): """Combine many maps from group to counter to amount.""" result = {} for counters in counters_list: for group, counter_to_amount in counters.items(): for counter, amount in counter_to_amount.items(): result.setdefault(group, {}) result[group].setdefault(counter, 0) result[group][counter] += amount return result
0806ec754397d00a72ed985ef44081c9deb025c7
17,235
def solution(A, target): # O(N) """ Similar to src.arrays.two_sum, find all the combinations that can be added up to reach a given target. Given that all values are unique. >>> solution([1, 2, 3, 4, 5], 5) 2 >>> solution([3, 4, 5, 6], 9) 2 """ remaining = {} # O(1) combinations = [] # O(1) for count, value in enumerate(A): # O(N) if value in remaining: # O(1) combinations.append([remaining[value], count]) # O(1) else: # O(1) remaining[target - value] = value # O(1) return len(combinations) # O(1)
adcdaa6825569a0d589b6abc01a514ce9d5f38f5
17,237
def exponential_ease_in_out(p): """Modeled after the piecewise exponential y = (1/2)2^(10(2x - 1)) ; [0,0.5) y = -(1/2)*2^(-10(2x - 1))) + 1 ; [0.5,1] """ if p == 0.0 or p == 1.0: return p if p < 0.5: return 0.5 * pow(2, (20 * p) - 10) else: return -0.5 * pow(2, (-20 * p) + 10) + 1
8d528b7628b735e1dd2e3e8b89d0ef398c696ed6
17,239
import re def extract_urls(tweets, exclude_pattern): """Extract urls from twitter data.""" extracted_urls = [] twitter_pattern = r'(http[s]*://)(twitter.com)[\D\d]+' for tweet in tweets: temp = set() if tweet.get("retweeted_status"): for url in tweet["retweeted_status"]['entities']['urls']: expanded_url = url['expanded_url'] if re.match(twitter_pattern, url['expanded_url']): continue if re.match(exclude_pattern, expanded_url): continue temp.add(expanded_url) if tweet.get('entities'): for url in tweet['entities']['urls']: expanded_url = url['expanded_url'] if re.match(twitter_pattern, url['expanded_url']): continue if re.match(exclude_pattern, expanded_url): continue temp.add(expanded_url) for temp_url in temp: extracted_urls.append(temp_url) return extracted_urls
c6da6d3870d21f5698cb16c41283f7a3d685425e
17,240
from bs4 import BeautifulSoup import requests import re def get_new_vacancies(tag='machinelearning'): """returns list of new vacancies which are up to date""" def extract_text(obj): """Extraxt text from html code""" ans = [] for i in obj: for ii in i: try: ans.append(ii.get_text()) except AttributeError: ans.append(ii) pass return ' '.join(str(i) for i in ans) url_ds = 'https://yandex.ru/jobs/vacancies/dev/?cities=213&tags=' + tag try: page = requests.get(url_ds, timeout=30.0) # URL HERE ### except: # There may be unknown error via server block return ['While server parsing error occured. There are no respond from server.'] data = page.text structured_links = BeautifulSoup(data, 'lxml') try: vacancies = structured_links.find_all(class_='serp__item') except: # Error cause extraction failed return ['Extraction from server Error'] list_of_matched_vacancies = [vacancy for vacancy in vacancies if re.findall(tag, vacancy.get('data-bem'))] matched_vacancies = [extract_text(vacancy) for vacancy in list_of_matched_vacancies] return matched_vacancies
48a7fec3ce745d3f84f81c645d1b89d71d036777
17,242
import inspect def issetdescriptor(object): """Return true if the object is a method descriptor with setters. But not if ismethod() or isclass() or isfunction() are true. """ if inspect.isclass(object) or inspect.ismethod(object) or inspect.isfunction(object): # mutual exclusion return False tp = type(object) return hasattr(tp, "__set__")
9d292444eebd8fe4ff8141fe64de2b319572727d
17,243
def _trim_data(data, max_rows, max_columns=None): """Prints a warning and returns trimmed data if necessary.""" # If the number of columns per row exceeds the max, we need to trim each row. if max_columns is not None and len(data) and len(data[0]) > max_columns: for i, _ in enumerate(data): data[i] = data[i][:max_columns] if len(data) <= max_rows: return data print(('Warning: total number of rows (%d) exceeds max_rows (%d). ' 'Limiting to first max_rows.') % (len(data), max_rows)) return data[:max_rows]
1e6426a4aac1163251c6b1f876dad776bc054a12
17,244
def unique_2d_list(list_: list): """ Only keep those lists inside the big list which are unique. Args: list_: list of list where second dimension can contain duplicates Returns: """ return list(map(lambda x: list(x), set(map(lambda x: tuple(x), list_))))
15d3512c4fb00d6c7d624af230c019e9438c0cd5
17,245
import sys def ask_confirm(question, default_yes): """Ask the user a yes/no question. This only looks at the first character of the response, and is case-insensitive. Takes: question (str): The question to ask the user. Include a question mark. default_yes (bool): If the user just hits enter, should we assume 'yes'? Returns a bool of True for a 'yes' response, False for 'no'. """ while True: sys.stdout.write("{} [{}] ".format(question, "Y/n" if default_yes else "y/N")) sys.stdout.flush() response = sys.stdin.readline() if response[0].lower() == 'y': return True elif response[0].lower() == 'n': return False elif len(response) == 1: return default_yes
caae040c2a7013bdeec6c41e08ce5c3204d76995
17,246
def calculate_total_clones(haplotypes): """ # ======================================================================== CALCULATE TOTAL CLONES PURPOSE ------- Calculates the total number of clones accross multiple haplotypes. Note that there may be multiple clones associated with each haplotype. INPUT ----- [HAPLOTYPE LIST] [haplotypes] The list of Haplotypes from which to build the distance matrix. RETURN ------ [INT] The total number of clones accross all the haplotypes in the list. # ======================================================================== """ total = 0 for haplotype in haplotypes: total += haplotype.count return total
43ac5e77f0411cedd2b74a3107e8d101fb4e32cd
17,247
def xor_bytes(a, b): """Returns a byte array with the values from XOR'ing each byte of the input arrays.""" if len(a) != len(b): raise ValueError("Both byte arrays must be the same length.") return bytes([a[i] ^ b[i] for i in range(len(a))])
7cf107be20d916eeef6414118b8da35926997814
17,248
def _is_numeric(obj): """Return True if obj is a number, otherwise False. >>> _is_numeric(2.5) True >>> _is_numeric('spam') False """ try: obj + 0 except TypeError: return False else: return True
8122eea635fd5ed9b2d0e42bda284631cc6cd07b
17,249
def run_migration(connection, queries): """ Apply a migration to the SQL server """ # Execute query with connection.cursor() as cursorMig: cursorMig.execute(queries) connection.commit() return True
b7364b51bbefecd7222dfe0be972fd697f4c10e8
17,251
def _get_arg_parser_delete(parser): """ Parses arguments given at the command line :return: Namespace object built from attributes parsed from command line. """ parser.add_argument( "-i", "--index", type=str, required=True, help=f"index to delete", ) return parser
2244ce50ac29c0a6c67aff3b6abbb63eaa8ce8d0
17,252
def identity(x): """ 恒等関数 Args: x(np.array): 入力値 Returns: 入力値と同じ配列 """ return x
76a5d06675e9244b49acf74cf955a1dd9c6462c4
17,253
def has_divisor(n): """Dado un entero n >= 0, devuelve None si es primo, o el menor divisor encontrado si no""" factor = 2 # para iterar en los posibles divisores que iremos comprobando max_factor = int(round(n ** 0.5)) salto = 1 while factor <= max_factor: if n % factor == 0: return factor # no hace falta seguir factor = factor + salto salto = 2 # después del 2, no hace falta comprobar ningún otro factor par! return None
1279c3becb4367f200fbf4ea6da6477f8b7bba2d
17,256
def get_books_by_year(args, books): """ Get books published between two year arguments :param args: args object containing all arguments :param books: A list of book objects read from csv file :return: A list of book objects published between two year arguments """ if not args.year: return None # If an odd number of year arguments are entered, pop out the last one. if (len(args.year) % 2) == 1: args.year.pop() i = 0 year_book_list = [] sorted_years = sorted(args.year) while i < (len(args.year) - 1): for book in books: if int(book.year) >= sorted_years[i] and int(book.year) <= sorted_years[i + 1]: if not book in year_book_list: year_book_list.append(book) i += 2 return year_book_list
b9009c9d6a08eec9db263d82e43da04255f6fe67
17,262
def rivers_by_station_number(stations, N): """Builds and returns a list of the N rivers with the greatest number of stations (as a tuple: river name, number of stations) In the case where there are multiple rivers with the same number of stations as the Nth station, these are also included. """ rivers = [] for station in stations: rivers.append(station.river) counts = set() for river in rivers: counts.add((river, rivers.count(river), )) counts = sorted(counts, reverse = True, key=lambda x: x[1]) top_n = counts[:N] for i in range(N,len(counts)): if top_n[N-1][1] == counts[i][1]: top_n.append(counts[i]) return top_n
e892743c6c953c926eada8f648d929bfcd368dbb
17,263
import os def get_tldr_root(): """ Get the path of local tldr repository for environment variable TLDR_ROOT. """ # If this script is running from tldr/scripts, the parent's parent is the root f = os.path.normpath(__file__) if f.endswith("tldr/scripts/set-more-info-link.py"): return os.path.dirname(os.path.dirname(f)) if "TLDR_ROOT" in os.environ: return os.environ["TLDR_ROOT"] else: raise SystemExit( "\x1b[31mPlease set TLDR_ROOT to the location of a clone of https://github.com/tldr-pages/tldr.\x1b[0m" )
2a6aa6bc225ebae2f839afb01cd1a76468e30f1d
17,265
def GetGLGetTypeConversion(result_type, value_type, value): """Makes a gl compatible type conversion string for accessing state variables. Useful when accessing state variables through glGetXXX calls. glGet documetation (for example, the manual pages): [...] If glGetIntegerv is called, [...] most floating-point values are rounded to the nearest integer value. [...] Args: result_type: the gl type to be obtained value_type: the GL type of the state variable value: the name of the state variable Returns: String that converts the state variable to desired GL type according to GL rules. """ if result_type == 'GLint': if value_type == 'GLfloat': return 'static_cast<GLint>(round(%s))' % value return 'static_cast<%s>(%s)' % (result_type, value)
aa2c283985fb824c603efe76b69479667c4fdd96
17,266
def list_to_csv(row): """ Takes a list and converts it to a comma separated string. """ format_string = ",".join(["{}"] * len(row)) return format_string.format(*row)
104cc3e75d9c5d39fbdc7b7decd274a50b6e1b08
17,267
import torch def collate_fn(batch): """ function needed for data loaders """ feature_list, protein_seq_list, label_list = [], [], [] for _features, _protein_seq, _label in batch: #print(type(_features), type(_protein_seq), type(_label)) feature_list.append(_features) protein_seq_list.append(_protein_seq) label_list.append(_label) return torch.Tensor(feature_list), torch.Tensor(protein_seq_list), torch.Tensor(label_list)
59925b5636cb26c9652208ba3ae4dad418713329
17,269
def get_deformation_field_scales(reg_params): """ Calculates the scaling of the deformation field from real space to voxels :param reg_params: :return: """ x_scale = 1000 / float(reg_params.atlas_x_pix_size) y_scale = 1000 / float(reg_params.atlas_y_pix_size) z_scale = 1000 / float(reg_params.atlas_z_pix_size) return x_scale, y_scale, z_scale
ae3f8c2cd97ed719d045dfa866fc585927c1a87f
17,270
def add_link(s): """ if `s` is a url, then adds anchor tags for html representation in ipynb. """ if s.startswith('http'): a = '<a href="{0}" target="_blank">'.format(s) a += s a += '</a>' return a
5a85592b9c976e2f20874849287e9eded552c98c
17,271
def GenerateCompareBuildsLink(build_ids, siblings): """Return the URL to compare siblings for this build. Args: build_ids: list of CIDB id for the builds. siblings: boolean indicating whether sibling builds should be included. Returns: The fully formed URL. """ params = ['buildIds=%s' % ','.join([str(b) for b in build_ids])] if siblings: params.append('includeSiblings=true') return 'http://go/buildCompare?%s' % '&'.join(params)
c309d71cff85becaa0d9cac26dd2a0481475a6ff
17,272
from functools import reduce def compute_fitness(tree, features, data): """ Computes a normalized MAE on the predictions made by one tree. """ predicted = [tree.predict(feat) for feat in features] difference = [abs(predicted[i] - data[i]) for i in range(len(data))] mae = reduce(lambda a,b: a+b, difference) / len(data) fitness = 1 / mae if mae != 0 else 1.0 fitness /= len(tree.nodes) return fitness
57f158cbb9e9cc6137e328fd006a97d3abc431e7
17,274
import os import time def _cache_load_base(save_fn, load_fn, filepath, preprocess_fn, *args, **kwargs): """ Args: save_fn (callable): save dataset function load_fn (callable): load dataset function filepath (str): filepath to cache dataset preprocess_fn (callable): dataset preparation function *args: args for `preprocess_fn` **kwargs: kwargs for `preprocess_fn` Returns: dataset """ SLEEP_TIME = 3 # 3sec if not os.path.exists(filepath): if preprocess_fn is None: raise ValueError('filepath {} does not exist, ' 'preprocess_fn must not be None'.format(filepath)) # Preprocess and cache(save) datasets print('[INFO] _cache_load_base: Preprocessing dataset...') datasets = preprocess_fn(*args, **kwargs) if not isinstance(datasets, tuple): datasets = (datasets, ) save_fn(filepath, datasets) # Now the datasets should be ready. retry_count = 0 while not os.path.exists(filepath): # This case may happen when `save_fn` was async method. print('[WARNING] {} not found, retry in {} sec.' .format(filepath, SLEEP_TIME)) time.sleep(SLEEP_TIME) retry_count += 1 assert retry_count < 100, '[ERROR] {} not found after cache.' return load_fn(filepath)
24c63ac5506d427e735d4e0100c0fb0869e5e781
17,275
import requests def get_displayable_tracks_metadata(authorization_header,track_ids): """ get the relevant metadata for the tracks that will be displayed on the page Args: authorization_header (dict): valid authorization header track_ids (list): list of track_ids to get data for Returns: Dictionary: relevant tracks and their metadata in JSON format """ track_ids_string = ','.join(track_ids) query_params = f'?ids={track_ids_string}' retrieved_metadata = requests.get(f'https://api.spotify.com/v1/tracks{query_params}', headers=authorization_header).json() total_track_metadata = [] for track_data in retrieved_metadata['tracks']: # i just need the name and the album cover and url to play track_name = track_data['name'] artists = ' / '.join([artist['name'] for artist in track_data['artists']]) playable_url = track_data['external_urls']['spotify'] album_cover_url = track_data['album']['images'][0]['url'] track_metadata = {'name': track_name, 'playable_url':playable_url, 'album_cover_url':album_cover_url, 'artists':artists} total_track_metadata.append(track_metadata) return dict(zip(track_ids,total_track_metadata))
83254cc7e15055fafb8a3720d48bba1c718cd25d
17,277
def int_to_bool(value): """Turn integer into boolean.""" if value is None or value == 0: return False else: return True
aa8f0f15be18f0c682ad1df4ed0f710880d5ecd5
17,278
def apply(func, bound_args): """Function to properly apply a function with the arguments we have""" return func(*bound_args.args, **bound_args.kwargs)
e524b229b63fd2742d5775e5d0fedc14a50b347c
17,279
def deep_merge(base, changes): """ Create a copy of ``base`` dict and recursively merges the ``changes`` dict. Returns merged dict. :type base: dict :param base: The base dictionary for the merge :type changes: dict :param changes: The dictionary to merge into the base one :return: The merged ``result`` dict """ def merge(result, changes): for k, v in changes.items(): if not isinstance(v, dict): result[k] = v else: if k not in result or not isinstance(result[k], dict): result[k] = v.copy() else: result[k] = result[k].copy() merge(result[k], changes[k]) result = base.copy() merge(result, changes) return result
b74ac0e4213e8bfb0792f9e84053a96af3bb29f0
17,280
def arctanh(x): """ [Definition] x的反双曲正切函数 [Category] 反双曲函数 domain of definition: (-1, 1) """ return 'arctanh(%s)' %x
0d2ba74f154580b7429cffd6b124263f0f0a49ad
17,282
from typing import Optional def admin_obj_url(obj: Optional[object], route: str = "", base_url: str = "") -> str: """ Returns admin URL to object. If object is standard model with default route name, the function can deduct the route name as in "admin:<app>_<class-lowercase>_change". :param obj: Object :param route: Empty for default route :param base_url: Base URL if you want absolute URLs, e.g. https://example.com :return: URL to admin object change view """ if obj is None: return "" if not route: route = "admin:{}_{}_change".format(obj._meta.app_label, obj._meta.model_name) # type: ignore path = reverse(route, args=[obj.id]) # type: ignore return base_url + path
19601794a2455cf6f76231fd3a1c932fdbe09eae
17,283
def contains_subsets(iter_of_sets): """ Checks whether a collection of sets contains any sets which are subsets of another set in the collection """ for si in iter_of_sets: for sj in iter_of_sets: if si != sj and set(sj).issubset(si): return True return False
2b5055f0a31f5f00d975b49b08a4976c3c251fc5
17,285
def parse_owner(this_sample_id, owner): """ Extract name and contact from owner :param this_sample_id: :param owner: :return: """ name = "" email = "" for child in owner: if "Name" == child.tag and child.text: name = child.text elif "Contact" == child.tag and "email" in child.attrib: email = child.attrib['email'] return name, email
99d0d7ece452f7479b821ee1e62168bccd64fe5a
17,286
import re def href_html2tex(link): """Converts html links to Latex links""" link = link.replace("\n", "^%") matcher = r'<a href=[\'"]?([^\'" >]+)[\'"]?.*?>(<\w+>)*([^<>"]+).*?</a>' texfmt = r"\\href{\1}{\\textcolor{heading}{\\textbf{\3}}}" output = re.sub(matcher, texfmt, link) return output.replace("^%", "\n")
0f24da36da2bc8db62f82de42bf333142a9abf46
17,291
def evaluate(data): """ Cost function evaluation parameters: - average_total_time - average_total_queue_time - average_utility_rate (inverse relationship) """ average_total_time = sum(v[10] - v[0] for v in data.values()) / len(data.values()) average_total_queue_time = sum( sum(v[1::2]) - sum(v[:-1:2]) for v in data.values()) / len(data.values()) return average_total_time + average_total_queue_time
10011e192eb344d51b4f82bdac310c37e6dc9d45
17,292
import numpy def generate_regular_ngon_vertices(n: int, radius: float) -> numpy.ndarray: """Generate the vertices of a regular N-gon centered at zero.""" angles = (numpy.arange(n, dtype=float) * 2 * numpy.pi / n).reshape(-1, 1) return radius * numpy.hstack([numpy.cos(angles), numpy.sin(angles)])
8fcca53c55a99d4e2af9dcaad9bae0b52f6afa0d
17,293
def list_formatter(values): """ Return string with comma separated values :param values: Value to check """ return u', '.join(values)
3163d1303dac17b5c995386967759b4475790d47
17,294
import random import string def random_numeric_token(length): """Generates a random string of a given length, suitable for typing on a numeric keypad. """ return ''.join(random.choice(string.digits) for i in range(length))
b63ac76ff32b86d01fb3b74772340cf1ebfcc321
17,295
def tensor_to_string_list(tensor): """Convert a tensor to a list of strings representing its value""" scalar_list = tensor.squeeze().numpy().tolist() return ["%.5f" % scalar for scalar in scalar_list]
4c8844c5401850e6fb3364b4efbe745d7e5f0dad
17,296
def eval_if_exist_else_none(name, global_symbol_table): """ Args: name([str]): Expression passed into `eval`. local_symbol_table(dict): Specified from `globals()`. DO NOT use `locals()`, because all STATIC_CONVERT_VAR_SHAPE_SUFFIX vars is declared with keyword `global`. Returns: Return the variable if found in global_symbol_table else None. """ try: return eval(name, global_symbol_table) except: return None
146167724424da54f73cfbb05d6209a6f75127b7
17,297
def to_brackets(field_name, format_spec): """Return PEP 3101 format string with field name and format specification. """ if format_spec: format_spec = ':' + format_spec return '{' + field_name + format_spec + '}' return '{' + field_name + '}'
b699b664d1d6bee8c5009bc04513e67c3c15755b
17,299
import glob import os def find_osx_sdks(): """ Finds installed osx sdks, returns dict mapping version to file system path """ res = {} sdks = glob.glob("/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX*.sdk") for sdk in sdks: sdk_base = os.path.split(sdk)[1] ver = sdk_base[len("MacOSX"):sdk_base.rfind(".")] res[ver] = sdk return res
fe6c285a1025d55e589c8b076a3455f2fa7c0cc5
17,301
def line_2d(p1=(), p2=()): """ :param p1: Coordinates of first point :param p2: Coordinates of second point :return: Slope, intersection """ try: m = (p2[1] - p1[1]) / (p2[0] - p1[0]) b = p1[1] - m * p1[0] return [m, b] except ZeroDivisionError: if p2[1] == p1[1]: return [''] else: m = 'N.D.' b = 'N.D.' return [m, b]
e04b26b07d472086b0d81956b6955bd76ce7a184
17,302
import os import gc def software_details(): """ returns software details """ try: lists=[] for value in os.uname(): lists.append(value) sw_data={'sysname':lists[0],'nodename':lists[1],'release':lists[2],'version':lists[3],'machine':lists[4]} return sw_data except Exception as e: print(e) finally: gc.collect()
220d8ed760001024da9166f917d20c1c60007f83
17,304
def loweRatio(matches, kp1, kp2, LOWE_RATIO=0.7): """Reduce list of keypoints to likeliest matches using Lowe's Ratio test. [Reference] David G. Lowe. Distinctive Image Features from Scale-Invariant Keypoints. IJCV 2004""" # Store all the good matches as per Lowe's ratio test good = [] pts1 = [] pts2 = [] for m, n in matches: if m.distance < LOWE_RATIO * n.distance: good.append(m) pts1.append(kp1[m.queryIdx].pt) pts2.append(kp2[m.trainIdx].pt) return good, pts1, pts2
e284bfb2f45bc5c77bd4660885651f222c41bbba
17,305
import random def direction(): """Return a random direction as a tuple in the form: ( x-direction, y-direction)""" x = random.randint(-1, 1) y = random.randint(-1, 1) if (x, y) != (0, 0): return x, y return direction()
4c403581cba25569eb61a89887694592ccd57ea5
17,306
def clear_exceptions(estimates_output, param_dict): """Clears exceptions column of estimates_output df for all models that are going to be run (this is needed because the str defining exceptions get concatenated) """ estimates_output = estimates_output.copy() for code in param_dict.keys(): estimates_output.loc[code, 'exception_count'] = 0 estimates_output.loc[code, 'exception_comment'] = '.' return estimates_output
2532c2a61666c231897afbfbc4f243b1c81217a8
17,307
def bud_function(input_branch, output_branch, callable, args=[], kwargs={}): """callable(input_branch, *args, **kwargs) -> output_branch""" def bud(manager): return {output_branch:callable(manager[input_branch], *args, **kwargs)} return bud
442432402adbb36023507aadf072cd57ead1282e
17,308
def dtype_is_supported(dtype): """Check if data type is supported by BNNS backend""" return dtype in ("", "float32")
aaf0460dba171f532b914af38eec12858295b5a7
17,309
def concat_fields(one, two, sep): """ (str, str, str) -> str Function concat two multiline strings """ line = "" s_one, s_two = one.split("\n"), two.split("\n") for i in range(len(s_one)): line += (s_one[i]+sep+s_two[i]+"\n") return line
5c7847b24a797d7e07fde4731f6eec91ca2a86bf
17,311
import torch def decode_arr_to_seq(arr, idx2vocab): """Taken from OGB repo""" eos_idx_list = torch.nonzero(arr == len(idx2vocab) - 1) if len(eos_idx_list) > 0: clippted_arr = arr[: torch.min(eos_idx_list)] # find the smallest __EOS__ else: clippted_arr = arr return list(map(lambda x: idx2vocab[x], clippted_arr.cpu()))
8fdf960dd9d8c958dd66ff990bbeb31a2de34a34
17,312
import re def _get_valid_filter_terms(filter_terms, colnames): """Removes any filter terms referencing non-existent columns Parameters ---------- filter_terms A list of terms formatted so as to be used in the `where` argument of :func:`pd.read_hdf`. colnames : A list of column names present in the data that will be filtered. Returns ------- The list of valid filter terms (terms that do not reference any column not existing in the data). Returns none if the list is empty because the `where` argument doesn't like empty lists. """ if not filter_terms: return None valid_terms = filter_terms.copy() for term in filter_terms: # first strip out all the parentheses - the where in read_hdf # requires all references to be valid t = re.sub('[()]', '', term) # then split each condition out t = re.split('[&|]', t) # get the unique columns referenced by this term term_columns = set([re.split('[<=>\s]', i.strip())[0] for i in t]) if not term_columns.issubset(colnames): valid_terms.remove(term) return valid_terms if valid_terms else None
a47dff6d9c34e6fc75a77ecc2f9828bb1667f7bb
17,313
def chunk_size(value): """Calculate nice value for chunk size.""" return round(value / 1000 + 0.5) * 1000
06c075f964c8baa0fc32b047375e8cec8c9a560b
17,314
def dekker( t: float, x1: float, y1: float, x2: float, y2: float, x3: float, y3: float, x4: float, y4: float, ) -> float: """ Estimates the root using Dekker's method. Uses a secant line from (x2, y2) to either (x1, y1) or (x3, y3), depending on which point is closest. Note ---- If x3 is closer to x2 but using it does not result in a value between x1 and x2, then it is rejected and bisection is used. Division by 0 is checked here, and the solver checks if 0 < t < 1 before defaulting to bisection. """ # If x2 is closer to x1, then use (x1, y1). if abs(x2 - x1) <= abs(x2 - x3): return y2 / (y2 - y1) # If division by 0, then use bisection. elif y2 == y3: return 0.5 # If x2 is closer to x3 and using (x3, y3) does # not result in division by 0, then use (x3, y3). else: return y2 * (x3 - x2) / ((y2 - y3) * (x1 - x3))
a50d57f6961dc11293975f03eb047a659598abcb
17,316