content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from io import StringIO import sys def mock_print_stderr() -> StringIO: """Mock the print command to stderr""" mocked_print_output_stderr = StringIO() sys.stderr = mocked_print_output_stderr return mocked_print_output_stderr
177908b7536d70f1774283e8b7d031cffc2b0dd1
19,506
import os def mkdir(dirpath: str) -> None: """ Create directory. Check if directory exists and handles error. """ if not os.path.exists(dirpath): # Might seem redundant, but the multiprocessing creates error. try: os.mkdir(dirpath) except FileExistsError: pass return None
4a47a8868e128270cacabb9a77bb5c59d8f51381
19,508
import os import tempfile def _get_cache_dir(): """ Obtain location to store downloaded files for temporary caching. Uses `tempfile.gettempdir` by default. The environment variable ``PDS4TOOLSCACHEDIR`` may be used to specify an alternate directory. Returns ------- str or unicode Path to directory used to store a temporary cache of downloaded files. """ environ_cache_dir = os.environ.get('PDS4TOOLSCACHEDIR') if environ_cache_dir: cache_dir = environ_cache_dir else: cache_dir = tempfile.gettempdir() return cache_dir
247fc5a6ea7f9c6166a2f7aa7a4609a9283cf7a2
19,509
def prompt_to_continue(): """Prompt the user for whether they would like to continue with the commit and tag operation.""" print("Check the updated version files, then press 'y' to commit and tag. " + "Press any other key to abort.") choice = input().strip().lower() return True if choice == 'y' else False
e0990fcd9b72a5277a5b0509db4bcd53d9d7017d
19,510
def bernoulli(): """ Return model code for a Beta-Bernouilli model to be compiled by PyStan. Compares two arrays of binary data, where each entry in an array indicates success on an associated trial. Must provide hyperparameters `alpha_` and `beta_` as input data fields. Input Data ---------- { n_control : int # the number of control samples n_variation : int # the number of variation samples control : sequence[int] # the control samples (binary) variation : sequence[int] # the variation samples (binary) alpha_ : float # hyperparameter beta_ : float # hyperparameter } """ return """ data { int<lower=0> n_control; int<lower=0,upper=1> control[n_control]; int<lower=0> n_variation; int<lower=0,upper=1> variation[n_variation]; real<lower=0> alpha_; // beta prior alpha hyperparameter real<lower=0> beta_; // beta prior beta hyperparameter } parameters { real<lower=0, upper=1> theta_control; real<lower=0, upper=1> theta_variation; } transformed parameters { real delta; real delta_relative; real effect_size; delta = theta_variation - theta_control; delta_relative = theta_variation / theta_control - 1.0; effect_size = delta / sqrt((theta_control * (1 - theta_control) + theta_variation * (1 - theta_variation)) / 2); } model { // Beta prior theta_control ~ beta(alpha_, beta_); theta_variation ~ beta(alpha_, beta_); // Bernoulli Likelihoods control ~ bernoulli(theta_control); variation ~ bernoulli(theta_variation); } """
78acb4d329988e4393f090e80d72c13333199bf4
19,511
def lerp(a, b, i): """Linearly interpolates from a to b Args: a: (float) The value to interpolate from b: (float) The value to interpolate to i: (float) Interpolation factor Returns: float: Interpolation result """ return a + (b-a)*i
8422222416668a70feb2a75790a25d7ab1f0af14
19,512
def strip_datetime_for_db_operations(dt): """ Some of our queries get very slow unless we remove the timezone info before we put them into sql. """ return dt.replace(tzinfo=None)
4cf19578afdb213f2f57a845f711ebbb96b869f6
19,513
from typing import Union from pathlib import Path import os def pathify(path: Union[str, Path, os.PathLike]) -> Path: """Convert path-like argument to ``pathlib.Path``.""" if isinstance(path, Path): return path else: return Path(path)
a608922b27d0bd465fddde26ebf761cfcf088e89
19,514
def format_number(s): """Formata os numeros num formato comercial mais user-friendle""" #print('im in format number') s = str(s) def split1000(s, sep='.'): return s if len(s) <= 3 else split1000(s[:-3], sep) + sep + s[-3:] s = s.replace('.', ',') s = s.split(',') d = '' if len(s) > 1: d = s[1] s = s[0] simbol = '' if len(s) > 2 and s[0] == '-': s = s[1:] simbol = '-' s = split1000(s) if d != '': d = ',' + d #print(simbol + s + d) return simbol + s + d
debf97566ba7f4bf73957ea6b45aa6a61968dabe
19,515
from shutil import which def is_nmap_installed(): """Check whether `nmap` is on PATH and marked as executable.""" return which("nmap") is not None
719f4da32355293e4a12b532a916e1a76eeb5ddc
19,516
import os import json def get_all_map_metadata(cup, season=None): """ Get metadata for all maps for the specified cup and season. """ map_data_file = os.path.join( os.path.abspath(os.path.dirname(__file__)), "data", f"{cup}.json" ) if not os.path.exists(map_data_file): raise Exception(f"Could not find map metadata for specified cup: {cup}") with open(map_data_file, "r") as f: mapdat = json.load(f) # If the user does not specify a season, return every map's metadata if season is None: return mapdat # If the user specifies a season, return only the maps for that specified season keep_maps = [] for this_map in mapdat: keep = False if "mapStartSeason" in this_map: if this_map["mapStartSeason"] <= season: keep = True if "mapEndSeason" in this_map: if this_map["mapEndSeason"] <= season: keep = False if keep: keep_maps.append(this_map) return keep_maps
0a09332a4a1b02e96a671ae3c7ba610cf2d0dc58
19,517
def get_jcts_ms_from_job_id(finished_simulator, job_id): """Extracts the ideal and actual Job completion time for the specified Job. Returns: The ideal and actual JCTs (in ms) for the Job with the specified ID, as executed by the provided Simulator, which is assumed to have finished executing. """ for job, jcts_ms in finished_simulator.job_to_jcts.iteritems(): if job.job_id == job_id: return jcts_ms raise KeyError("Cannot find Job with id %s" % job_id)
d5cf0adec503d48a9ddeeb7e8837cc162ffc35d3
19,518
def response(update=None): """ Emulates JSON response from ulogin serivce for test purposes """ data = { 'network': 'vkontakte', 'identity': 'http://vk.com/id12345', 'uid': '12345', 'email': 'demo@demo.de', 'first_name': 'John', 'last_name': 'Doe', 'bdate': '01.01.1970', 'sex': '2', 'photo': 'http://www.google.ru/images/srpr/logo3w.png', 'photo_big': 'http://www.google.ru/images/srpr/logo3w.png', 'city': 'Washington', 'country': 'United States', } if update: data.update(update) return data
8a29ba65c38d5833e1474486135a9648f860a9a6
19,519
from pipes import quote def make_input_json_cmd(bam_fn, json_fn, sample): """CMD which creates a json file with content [['path_to_bam', 'sample']], which will later be used as pbsv call input. """ c0 = 'echo [[\\\"{}\\\", \\\"{}\\\"]] > {}'.format(quote(bam_fn), quote(sample), quote(json_fn)) return c0
5d50ea49027f6b7110193fed22a2990cc978c545
19,523
def iter_first_value(iterable, default=None): """ Get first 'random' value of an iterable or default value. """ for x in iterable: if hasattr(iterable, "values"): return iterable[x] else: return x return default
7fac2cbbe1a6923a1f737b0a86b208a62cfe6d50
19,524
import requests def download_tigrfam_info(tigrfam, api_endpoint): """ Download TIGRFAM from API endpoint. """ return requests.get(api_endpoint+tigrfam).text
4573fed797c3f3e6b16330fb7dd41aa57b156db1
19,525
def url_basename(url, content_type): """Return best-guess basename from URL and content-type. >>> from django_downloadview.utils import url_basename If URL contains extension, it is kept as-is. >>> print(url_basename(u'/path/to/somefile.rst', 'text/plain')) somefile.rst """ return url.split("/")[-1]
2ac158fe50199c367c8acac10c5c0f7a7a9a2607
19,526
import re def parse_line(line): """Reads the line from the dictionary, and gets the word and syllable count""" word = '' count = 0 groups = re.split('[^A-Za-z]+', line) for match in groups: if match: word = word + match count = count + 1 return [word, count]
7ac8029add3148b22f11840c915f166604e8b84b
19,527
def is_palindrome_permutation(input_string): """Checks to see if input_string is a permutation of a palindrome Using bool (is even?) instead of int value reduces space usage. Parameters ---------- input_string : str String to check Returns ------- bool True if input_string is a palindrome permutation, False otherwise """ # Generate is even counts hash table char_counts_is_even = {} for char in input_string: if char not in char_counts_is_even: char_counts_is_even[char] = False else: # if True (even count), swithc it to False (odd count) if char_counts_is_even[char]: char_counts_is_even[char] = False # if False switch to True else: char_counts_is_even[char] = True # Check is even counts are mostly even (allow up to 1 odd count) num_odd_counts = 0 # keep track of how many counts are odd for key, is_char_count_even in char_counts_is_even.items(): # Check to see if count is odd if not is_char_count_even: num_odd_counts += 1 # Check not too many odds if num_odd_counts > 1: return False return True
4867a94bd9051f07b04e60288739b3e98333fbda
19,529
def rec_map(callable, dict_seq_nest): """Recursive map that goes into dics, lists, and tuples. This function tries to preserve named tuples and custom dics. It won't work with non-materialized iterators. """ if isinstance(dict_seq_nest, list): return type(dict_seq_nest)(rec_map(callable, x) for x in dict_seq_nest) if isinstance(dict_seq_nest, tuple): return type(dict_seq_nest)(*[rec_map(callable, x) for x in dict_seq_nest]) if isinstance(dict_seq_nest, dict): return type(dict_seq_nest)((k, rec_map(callable, v)) for k, v in dict_seq_nest.items()) return callable(dict_seq_nest)
7d0f91a6af4c3e7dc6abdee9bcf059b421f9f784
19,530
def convert_box_xy(x1, y1, width, height): """ Convert from x1, y1, representing the center of the box to the top left coordinate (corner). :param x1: the x coordinate for the center of the bounding box :param y1: the y coordinate for the center of the bounding box :param width: with of the bounding box :param height: height of the bounding box :param img_width: the width of the image :param img_height: the height of the image :return: the top left coordinate (corner) of the bounding box """ left = (x1 - width // 2) top = (y1 - height // 2) if left < 0: left = 0 if top < 0: top = 0; return left, top
7524dd86f8c8ebff84cd9da0e75abd0b580547a0
19,531
from typing import List def longest_str(obj: List[str]) -> int: """Determine the length of the longest string in a list of strings. Args: obj: list of strings to calculate length of """ return round(max([len(x) + 5 for x in obj]), -1)
c9c33da184ff745fa4cfa8b8db38080b1330a642
19,534
def _get_tuple_state_names(num_states, base_name): """Returns state names for use with LSTM tuple state.""" state_names = [ ("{}_{}_c".format(i, base_name), "{}_{}_h".format(i, base_name)) for i in range(num_states) ] return state_names
8d96508174ec4893e9a9812d75471303ff972248
19,535
import functools def tp_cache(func): """Internal wrapper caching __getitem__ of generic types with a fallback to original function for non-hashable arguments. """ cached = functools.lru_cache(maxsize=None, typed=True)(func) @functools.wraps(func) def inner(*args, **kwargs): try: return cached(*args, **kwargs) except TypeError: # pragma: no cover pass # All real errors (not unhashable args) are raised below. return func(*args, **kwargs) # pragma: no cover return inner
22b8f866be0c0edb0839bf9d80d62ca7d5e2e0cc
19,538
def _position_is_valid(position): """ Checks if given position is a valid. To consider a position as valid, it must be a two-elements tuple, containing values from 0 to 2. Examples of valid positions: (0,0), (1,0) Examples of invalid positions: (0,0,1), (9,8), False :param position: Two-elements tuple representing a position in the board. Example: (0, 1) Returns True if given position is valid, False otherwise. """ if type(position) != tuple or len(position) != 2: return False return 0 <= position[0] < 3 and 0 <= position[1] < 3
f7f277a472ea7046b6029a657215fa7006a4d808
19,542
def pitch_info(data): """ Function for assigning pitch color and name """ # pitch color dictionary color = {'FF': 'red', 'FT': 'red', 'FC': 'red', 'FS': 'red', 'SI': 'red', 'CH': 'blue', 'CU': 'purple', 'CB': 'purple', 'KC': 'purple', 'KN': 'orange', 'SL': 'green', 'PO': 'black', 'IN': 'black', 'EP': 'black', 'SF': 'red'} # pitch name dictionary pitch_names = {'FF': 'Four-Seam Fastball', 'FT': 'Two-Seam Fastball', 'FC': 'Cutter', 'FS': 'Sinker', 'SI': 'Sinker', 'SF': 'Splitter', 'SL': 'Slider', 'CH': 'Change-Up', 'CB': 'Curveball', 'CU': 'Curveball', 'KC': 'Knuckle-Curve', 'KN': 'Knuckler', 'EP': 'Eephus', 'UN': 'Unidentified', 'PO': 'Pitch Out', 'XX': 'Unidentified', 'FO': 'Pitch Out'} pitch_color = [] pitch_name = [] for r in data.iterrows(): # assign pitch color pitch = r[1]['pitch_type'] try: pitch_color.append(color[pitch]) pitch_name.append(pitch_names[pitch]) except KeyError: pitch_color.append('black') pitch_name.append('Unidentified') data['color'] = pitch_color data['pitch_name'] = pitch_name return data
25c17057f5b2cc1c3505f6c9c05f02ac70bb2ddc
19,543
def parse_grid(data): """Parses the string representation into a nested list""" return data.strip().split('\n')
19adc64da16f09da97cefc553c8973c1ba984519
19,544
import pandas def order_by_row_group(df, column, groups): """ Order a dataframe by groups. Return the dataframe. Probably a better way to do this already, but I don't know what it is. """ dfs = [] for group in groups: dfs.append(df[df[column] == group]) out_df = pandas.concat(dfs) out_df = out_df.reset_index() return out_df
21dbc157dc2d901f0775304e7c7ce7acb2a15c2d
19,545
def generateActMessage(estopState:bool, enable: bool, height, angle): """ Accepts an input of two ints between -100 and 100 """ # Empty list to fill with our message messageToSend = [] messageToSend.append(int(estopState)) messageToSend.append(int(enable)) messageToSend.append(int(height)) messageToSend.append(int(angle)) print("Sending: %s" % str(messageToSend)) return messageToSend
6f62ffe14bce6dfe34fb15da882ea737ab8afa9c
19,546
def url_wrapper(urls): """ 拼接请求 url,调用对应的模块,如拼接 users 和 regist 成 url /users/regist, 调用 views.users.users_views.RegistHandle 模块 """ wrapper_list = [] for url in urls: path, handles = url if isinstance(handles, (tuple, list)): for handle in handles: # 分离获取字符串(如regist)和调用类(如views.users.users_views.RegistHandle) pattern, handle_class = handle # 拼接url,新的url调用模块 wrap = ('{0}{1}'.format(path, pattern), handle_class) wrapper_list.append(wrap) else: wrapper_list.append((path, handles)) return wrapper_list
23b7c6020892656c85567900138581becb9129e9
19,548
from typing import Counter def canConstruct(self, ransomNote, magazine): """ :type ransomNote: str :type magazine: str :rtype: bool """ m_dict = Counter(magazine) r_dict = Counter(ransomNote) for key, value in r_dict.items(): if key in m_dict: if m_dict[key] >= value: continue else: return False else: return False return True
a51fdb4fc7e2bc4dcc78049d19a1d5c79912920c
19,549
def hire(request): """ Hire page. Give us the $$$ """ return {}
86a73a4f024eea794d73dfc86d08e3ce073b28f1
19,550
def read_questions(file): """读取questions测试集 - 完全多轮模式,即 QQ + A""" q_sep = "<q>" with open(file, 'r') as f: test_q = [x.strip("\n") for x in f.readlines()] single_turn = [x for x in test_q if "<s>" in x] multi_turn = [x for x in test_q if "<s>" not in x] test_set = [] # 单轮测试数据 for i in range(len(single_turn)): tq = single_turn[i] q_split = tq.split("<s>") if len(q_split) == 5: q_split = [q_split[2], q_split[4]] elif len(q_split) == 3: q_split = [q_split[0], q_split[2]] else: q_split = q_split[-2:] tq = q_sep.join(q_split) test_set.append(tq) # 多轮测试数据 for i in range(len(multi_turn)): # 遇到空行,跳过 if multi_turn[i] == "": continue # 首行 if i == 0: test_set.append(multi_turn[i]) continue # 逐行处理(构造QQ) if multi_turn[i - 1] == "": test_set.append(multi_turn[i]) else: q = multi_turn[i - 1] + q_sep + multi_turn[i] test_set.append(q) return test_set
eaabd6461a0c85be2b5b8d02865a352052d3fcca
19,552
def getInv(wmap): """ Get the inverse map of a dictionary. :param wmap: Dictionary :returns: Dictionary which is an inverse of the input dictionary """ inv_map = {} for k, v in wmap.items(): inv_map[v] = inv_map.get(v, []) inv_map[v].append(k) return inv_map
441bfa8e543a3aa24494d290e1e3cf6baa81437e
19,553
def optimal_noverlap(win_name,win_len): """ This function is intended to support scipy.signal.stft calls with noverlap parameter. :param win_name: (str) name of the window (has to follow scipy.signal.windows naming) :param win_len: (int) lenght of the FFT window :return : (int) optimal overlap in points for the given window type """ window_to_overlap_coef = {'hann': 0.5, 'hamming': 0.75, 'blackmanharris': 0.75, 'blackman': 2/3, 'flattop': 0.75, 'boxcar': 0} try: noverlap = int(win_len*window_to_overlap_coef[win_name]) except KeyError as exc: print(exc) print('The window you have selected is not recognized or does not have optimal overlap. Setting window overlap to default 75%.') noverlap = int(win_len*0.75) return noverlap
d98db08a9e08b6639a38a324799d1141e65b1eb4
19,554
from contextlib import suppress def dictify(value): """Ensure value is a dictionary.""" with suppress(AttributeError): return {k: dictify(v) for k, v in value.items()} if isinstance(value, str): return value with suppress(TypeError): return [dictify(x) for x in value] return value
3ea2a84bc38180bbe8db7f89495cc2227a881241
19,555
def compact(f): """ remove spaces from str output of f, used for test for convenience""" def _wrapper(*args, **kwargs): return f(*args, **kwargs).replace(' ', '') return _wrapper
5eb18fc529172def0b829409f5fcbc402ee12353
19,557
def split_name_from_full_name(full_name) -> dict: """ Retrieves given, middle, family name from full_name. :param str full_name: """ name = {} full_name = full_name.split() name["given_name"] = full_name[0] if len(full_name) >= 2: name["family_name"] = full_name[-1] if len(full_name) >= 3: name["middle_name"] = " ".join(full_name[1:-1]) return name
81afb234e3551dca01f87890131ce63873428e13
19,558
def _prefix_range(target, ge, le): """Verify if target prefix length is within ge/le threshold. Arguments: target {IPv4Network|IPv6Network} -- Valid IPv4/IPv6 Network ge {int} -- Greater than le {int} -- Less than Returns: {bool} -- True if target in range; False if not """ matched = False if target.prefixlen <= le and target.prefixlen >= ge: matched = True return matched
536eea3be670e21065f5cd81b5f0e268d564c559
19,559
def extract_bytecode(compiled, contract_name): """ :compiled - content from smart contract file :contract_name - list / empty of contract names """ return [{ contract: { 'bytecode': compiled[contract]['bin'], 'abi': compiled[contract]['abi'] } } for contract in contract_name]
2b30db4a37e555efb2831524f9e909ce6259bb51
19,560
def _get_nodes_perms_key(user, parent_id=None) -> str: """ Returns key (as string) used to cache a list of nodes permissions. Key is based on (user_id, parent_id) pair i.e we cache all nodes' permissions dictionary of folder. If parent_id == None - we will cache all root documents of given user. """ uid = user.id pid = parent_id or '' nodes_perms_key = f"user_{uid}_parent_id_{pid}_readable_nodes" return nodes_perms_key
fb3875a53c17b6a818f9bb97738a47222733af70
19,561
from typing import OrderedDict def train(model, dataloader, criterion, optimizer, device='cpu', t=None, best_acc=None): """One iteration of model training. Intentionally kept generic to increase versatility. :param model: The model to train. :param dataloader: Dataloader object of training dataset. :param criterion: The loss function for measuring model loss. :param device: 'cuda' if running on gpu, 'cpu' otherwise :param t: Optional tqdm object for showing progress in terminal. :param best_acc: Optional parameter to keep track of best accuracy in case code is run in multiple iterations. :return: Training accuracy """ # Initialize variables model.train() train_loss = 0 correct = 0 total = 0 for batch_idx, (inputs, targets) in enumerate(dataloader): inputs, targets = inputs.to(device), targets.to(device) # Forwards pass optimizer.zero_grad() outputs = model(inputs) # Backpropagation loss = criterion(outputs, targets) loss.backward() optimizer.step() # Keep track of loss train_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() # Print results to terminal if t is not None and dataloader.num_workers == 0: od = OrderedDict() od['type'] = 'train' od['loss'] = train_loss / (batch_idx + 1) od['acc'] = 100. * correct / total od['test_acc'] = best_acc if best_acc is not None else None od['iter'] = '%d/%d' % (correct, total) t.set_postfix(ordered_dict=od) t.update(inputs.shape[0]) else: print('Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) return 100. * correct / total
f65899cd5d769cef628c15e35835bcd7a93bde60
19,562
import json def cvclass_to_qid(array): """ Turn computer vision classes into Wikidata qids""" with open( "../data/classes_places.json", "r", encoding="utf-8" ) as f: classes = json.load(f) try: qid_array = [] for el in array: key = el.split( '/' )[0].lower().strip() qid_array.append( "wd:" + [i['wiki_id'] for i in classes if i['original_class'] == key][0] ) return qid_array except (IndexError, TypeError) as e: print( 'no match' )
421c99889160756fbac2ebb08a3a660600b0a977
19,563
def bezout(a, b): """returns u, v such as au+bv = pgcd(a,b)""" if b == 0: return (1, 0) else: (u, v) = bezout(b, a % b) return (v, u - (a // b) * v)
eb4915f18d09ba344df64ded886a73dad7638acb
19,564
def get_columns(filters): """return columns based on filters""" columns = [ { 'fieldname': 'item_code', 'fieldtype': 'Link', 'label': 'Item Code', 'options': 'Item', 'width': 120 }, { 'fieldname': 'item_name', 'fieldtype': 'Data', 'label': 'Item Name', 'width': 100 }, { 'fieldname': 'stock_uom', 'fieldtype': 'Link', 'label': 'UOM', 'options': 'UOM', 'width': 60 }, { 'fieldname': 'warehouse', 'fieldtype': 'Link', 'label': 'Warehouse', 'options': 'Warehouse', 'width': 100 }, { 'fieldname': 'shelf', 'fieldtype': 'Link', 'label': 'Shelf', 'options': 'Shelf', 'width': 100 }, { 'fieldname': 'batch_no', 'fieldtype': 'Link', 'label': 'Batch', 'options': 'Batch', 'width': 100 }, { 'fieldname': 'opening_qty', 'fieldtype': 'Float', 'label': 'Opening Qty', 'width': 120 }, { 'fieldname': 'in_qty', 'fieldtype': 'Float', 'label': 'In Qty', 'width': 100 }, { 'fieldname': 'out_qty', 'fieldtype': 'Float', 'label': 'Out Qty', 'width': 100 }, { 'fieldname': 'bal_qty', 'fieldtype': 'Float', 'label': 'Balance Qty', 'width': 100 }, { 'fieldname': 'creation', 'fieldtype': 'Datetime', 'label': 'Creation', 'width': 180 }, { 'fieldname': 'valuation_rate', 'fieldtype': 'Float', 'label': 'Valuation Rate', 'width': 180 } ] return columns
4b12a723a0372b7b45908b6d13882c5bd6800a35
19,565
def frequency(string, word): """ Find the frequency of occurrences of word in string as percentage """ word_l = word.lower() string_l = string.lower() # Words in string count = string_l.count(word_l) # Return frequency as percentage return 100.0*count/len(string_l)
dd2183dcec04bdf835ab22a8a351d53571f6e5e9
19,566
import math def calc_nrows_ncols(N, aspect=(16, 9)): """ Computs the number of rows and columns to fit a given number N of subplots in a figure with aspect `aspect`. from: https://stackoverflow.com/questions/36482328/how-to-use-a-python-produce-as-many-subplots-of-arbitrary-size-as-necessary-acco """ width = aspect[0] height = aspect[1] area = width * height * 1.0 factor = (N / area) ** (1 / 2.0) cols = math.floor(width * factor) rows = math.floor(height * factor) rowFirst = width < height while rows * cols < N: if rowFirst: rows += 1 else: cols += 1 rowFirst = not (rowFirst) return rows, cols
2e6f2be133e60f4ae4d096a4d5402c9dd4b2015b
19,567
def get_func_names(job_content): """ Get function names from job content json :param job_content: job content info :return: function names """ func_names = [] for op in job_content["op_list"]: if "func_name" in op: func_names.append(op["func_name"]) return func_names
58d32e48f308758a2d6028f3312c2b376e04c9f5
19,568
def split_res(res): """ """ res = res.lower() x, y = res.split("x") return int(x), int(y)
4baae2a5d6ad632b213a529919d7051b3e9eaf8a
19,570
from functools import reduce def compose(*funcs): """Performs right-to-left function composition. The rightmost function may have any arity; the remaining functions must be unary. Note: The result of compose is not automatically curried""" return lambda v: reduce(lambda accum, f: f(accum), funcs[::-1], v)
000e209c941678f3336a4dfff301a65c6ec20f06
19,571
def month(day): """ Given a the number of days experienced in the current year it returns a month :param day: Current number of days in the year :return: str, month """ if 1 >= day <= 31: return "January" elif 31 >= day <= 59: return "February" elif 60 >= day <= 90: return "March" elif 91 >= day <= 120: return "April" elif 121 >= day <= 151: return "May" elif 152 >= day <= 181: return "June" elif 182 >= day <= 212: return "July" elif 213 >= day <= 243: return "August" elif 244 >= day <= 273: return "September" elif 274 >= day <= 304: return "October" elif 305 >= day <= 334: return "November" else: return "December"
37ae648375aaa6c5f59ba356119686b8fb89b224
19,572
import random def insert_node(G, route): """ Inserts a random node in a route to modify it. Args: G - graph of the network route - the route, as a sequence of edges. Returns: A modified route, with an extra node. """ # Initialize list removed_edge_index = [] ins_candidate_edges = [] ins_candidate_nodes = [] # Search viable nodes for i, edge in enumerate(route): # Generate list of nodes 'k' as (u, k) and (k, v) in_nodes = [v for u, v in G.edges(edge[0])] out_nodes = [v for u, v in G.edges(edge[1])] # Generate intersection candidate_nodes = list(set(in_nodes) & set(out_nodes)) # If len(candidate_nodes) > 0... if len(candidate_nodes) > 0: removed_edge_index.append(i) ins_candidate_edges.append(edge) ins_candidate_nodes.append(candidate_nodes) # Check for a viable solution if len(ins_candidate_edges) and len(ins_candidate_nodes) > 0: # Select random edge from candidates selection_index = random.randint(0, len(removed_edge_index) - 1) selected_edge = ins_candidate_edges[selection_index] selected_edge_index_in_route = removed_edge_index[selection_index] # Select random node from candidates for selected edge selected_node = random.choice(ins_candidate_nodes[selection_index]) # Generate new edges to add edge_1 = (selected_edge[0], selected_node) edge_2 = (selected_node, selected_edge[1]) # Remove edge from route route.pop(selected_edge_index_in_route) # Add new edges route.insert(selected_edge_index_in_route, edge_1) route.insert(selected_edge_index_in_route + 1, edge_2) return route
ab1ab82e88ec9069a1a5c83c8f6adbd5b772ea1c
19,574
def start_server(enode, path): """ Start or restart the tftp server. Looks for xined config file followed by tftpd-hpa config file and uses the first service found. :param str path: sets the root folder for the tftp server :rtype: bool :return: true if the service started """ path = path.replace('/', '\/') if(enode('[ -f /etc/xinetd.d/tftp ] && echo "Y"') == 'Y'): cfg_file = "/etc/xinetd.d/tftp" enode("sed -i '/disable/ s/yes/no/' {}".format(cfg_file)) enode("sed -i '/server_args/ s/=.*/= -s {}/' {}".format(path, cfg_file)) result = enode("service xinetd restart") elif(enode('[ -f /etc/default/tftpd-hpa ] && echo "Y"') == 'Y'): cfg_file = "/etc/default/tftpd-hpa" enode("sed -i '/DIRECTORY/ s/=.*/=\"{}\"/' {}".format(path, cfg_file)) result = enode("service tftpd-hpa restart") else: raise Exception( "Cannot find supported tftp service (no config file found)." ) return('failed' not in result)
4ed74e994e964fc5ea2c171662f09d7353ad261c
19,575
import os def extension_of_markdown_file(file_path_without_ext): """ Find markdown file with the specific file path :param file_path_without_ext: file path without extension :return: file extension """ if os.path.exists(file_path_without_ext + '.md') is not False: return 'md' elif os.path.exists(file_path_without_ext + '.MD') is not False: return 'MD' elif os.path.exists(file_path_without_ext + '.mdown') is not False: return 'mdown' elif os.path.exists(file_path_without_ext + '.markdown') is not False: return 'markdown' else: return None
4e258b54906a1bae8a7195d05ba206dbd0722b0f
19,576
def build_query_result_object(page): """ Builds result object for an image info. Parameters: page (obj): Page object of search result. Returns: query_result_object (obj): Result object of image. """ result_object = {} result_array = [] query_result_object = {} result_object["id"] = "M" + str(page["pageid"]) result_object["name"] = page["title"] result_object["score"] = 100 result_object["match"] = True result_array.append(result_object) query_result_object["result"] = result_array return query_result_object
a0eb0b2828d6c62587f12d73f27cfeb5d96ea6c2
19,578
def filter_out_dict_keys(adict, deny): """Return a similar dict, but not containing the explicitly denied keys Arguments: adict (dict): Simple python dict data struct deny (list): Explicits denied keys """ return {k: v for k, v in adict.items() if k not in deny}
a377c35f4eaedf0ed3b85eeaedbd45f7af0e8ec7
19,580
def loadraw(path, static=False): """Loads raw file data from given path with unescaped HTML. Arguments: path (str): Path to the file to read. static (bool): If True, all the `{` and `}` will be replaced with `{{` and `}}` respectively. Example: >>> # $ cat path/to/file.html >>> # <p>{foo}</p> >>> >>> loadtxt("path/to/file.html") >>> b'<p>{foo}</p>' >>> >>> loadtxt("path/to/file.html", static=True) >>> b'<p>{{foo}}</p>' """ with open(path) as f: data = f.read().strip() if static: data = data.replace("{", "{{").replace("}", "}}") return data.encode()
438d4128bd72a81f46e581984d1cf53b3d11254a
19,581
def is_prompt_toolkit_available(): """Checks if prompt_toolkit is available to import.""" try: return True except ImportError: return False
0c447a07f35e4830f1f8fe65233d2ce957397850
19,582
def get(obj, path): """ Looks up and returns a path in the object. Returns None if the path isn't there. """ for part in path: try: obj = obj[part] except(KeyError, IndexError): return None return obj
f7d936174f1171c42cd7ec2fa4237a887e78bb0e
19,584
from typing import List def minimum_sum(triangle: List[List[int]]) -> int: """使用动态规划求解""" if not triangle: return 0 res = triangle[-1] for i in range(len(triangle) - 2, -1, -1): for j in range(len(triangle[i])): res[j] = triangle[i][j] + min(res[j], res[j + 1]) return res[0]
e0fa2a1e91ec834454a0c16e75424fe5d84de8ac
19,585
def break_time_point(cape_cube, precip_cube): """Modifies cape_cube time points to be incremented by 1 second and returns the error message this will trigger""" cape_cube.coord("time").points = cape_cube.coord("time").points + 1 return r"CAPE cube time .* should be valid at the precipitation_rate_max cube lower bound .*"
70f16939983024d58745745abd0a921aa015615c
19,586
import platform from pathlib import Path def get_nlp_path(user="yanndubs") : """Return (create if needed) path on current machine for NLP stanford.""" machine_name = platform.node().split(".")[0] machine_path = Path(f"/{machine_name}/") user_paths = list(machine_path.glob(f"*/{user}")) if len(user_paths) == 0: possible_paths = [p for p in machine_path.iterdir() if "scr" in str(p)] user_path = possible_paths[-1] / user user_path.mkdir() else: user_path = user_paths[-1] return user_path
cb0c50ae6bab81487c92475069875a0dba5ab684
19,587
def _build_msg_string(instead: str, since: str) -> str: """Build a deprecation warning message format string. .. versionadded:: 3.0 .. versionchanged:: 7.0 `since`parameter must be a release number, not a timestamp. :param instead: suggested replacement for the deprecated object :param since: a version string string when the method was deprecated """ if since and '.' not in since: raise ValueError('{} is not a valid release number'.format(since)) if instead: msg = '{{0}} is deprecated{since}; use {{1}} instead.' else: msg = '{{0}} is deprecated{since}.' return msg.format(since=' since release ' + since if since else '')
7ba4d54bc8edb9efa0c7161164754cfd76cd5c21
19,589
import time def Sleep(x, duration=1): """ iterable >> Sleep(duration) Return same input as console but sleep for each element. >>> from nutsflow import Collect >>> [1, 2, 3] >> Sleep(0.1) >> Collect() [1, 2, 3] :param iterable iterable: Any iterable :param object x: Any input :param float duration: Sleeping time in seconds. :return: Returns input unaltered :rtype: object """ time.sleep(duration) return x
4d37f22a1f00f0157c10fe8f25983d7abf5ef63d
19,590
import pickle def load_pickle_object(filename: str): """Load/Unpickle a python object binary file into a python object. Args: filename (string): pickle filename to be loaded. (<filename>.pkl) Returns: python object: returns the loaded file as python object of any type """ obj = None with open(filename, 'rb') as fp: obj = pickle.load(fp) return obj
d079728fce59420cba6d17da8743d6dba8d4d37d
19,592
def nonzero_reference_product_exchanges(dataset): """Return generator of all nonzero reference product exchanges""" return (exc for exc in dataset['exchanges'] if exc['type'] == 'reference product' and exc['amount'])
77796243c4f07b0877c997a9103a4d14ab48f2c7
19,596
def __update_agent_state(current_agent_state, transition_probability, rng): """ Get agent state for next time step Parameters ---------- current_agent_state : int Current agent state. transition_probability : ndarray Transition probability vector corresponding to current state. rng : numpy random number generator Returns ------- agent_state : int Agent state at next time step. """ choice = rng.uniform() agent_state = current_agent_state for i in range(len(transition_probability)): if choice < sum(transition_probability[0: i + 1]): agent_state = i break return agent_state
4d447f196ac6a326720cdf69c67286fe5053e5fc
19,597
def get_train_test_ind(paths): """ Select from the list of all files the train and test files :param paths: all files :return: list of train and list of test data """ list_train = [] list_test = [] for i, str_path in enumerate(paths): str_sequence = str_path.split('/')[0] int_sequence = int(str_sequence.replace('s', '')) # sequence 3,7,10 are for test as in the original paper if int_sequence == 3 or int_sequence == 7 or int_sequence == 10: list_test.append(i) elif int_sequence <= 11: list_train.append(i) else: print("There is a problem") return list_train, list_test
a241732ece42ab92ad18c7df5dbf8f7b00efdf25
19,598
def qcMito(contint, records): """ If the total coverage of mitochondrial matches from (3) is >75% of the sequence length then flag the sequence as being mitochindrial sequence to be excluded. Return: set of regions to be trimmed """ # list of sequence to modify tomodify = {} # go through each record in the fasta file for record in records: modifications = {} if record in contint: # check if sum of intervals is smaller than 75% of the # record length recordlength = len(records[record].seq) allcontint = contint[record] coverage = 0 for interval in allcontint: coverage = coverage + abs(interval[0]-interval[1])+1 if coverage/recordlength > 0.75: modifications["remove"] = 1 if len(modifications): tomodify[record] = modifications return tomodify
2eb1c06e26d93c68440045f173bd5301119046a7
19,599
import operator def get_final_color(worst_pos, array, graph): # worst_pos вершина графа смежная с # max кол-вом вершин того же цвета """ записываем в словарь все вершины смежные с worst_pos, проверяем их цвета, исходя из этого находим цвет, в который окрашено минимальное количество смежных с worst_post вершин, т.е. находим наиболее валидный цвет из УЖЕ имеющихся""" connected_nodes = [] # все вершины, связанные с worst_pos, не включая саму worst_pos for edge in graph.edges: if edge.start == worst_pos: connected_nodes.append(edge.end) if edge.end == worst_pos: connected_nodes.append(edge.start) colors_dict = {} # словарь= цвет: кол-во вершин этого цвета for i in set(array): colors_dict.update({i: 0}) # заполняем словарь нулями # итерируемся по смежным с worst_pos вершинам # узнаем их цвет, и находим этот цвет в словаре colors_dict # увеличиваем значение этого цвета на один for node in connected_nodes: colors_dict[array[node]] += 1 # получение ключа из той пары в словаре, значение у которой самое большое return min(colors_dict.items(), key=operator.itemgetter(1))[0]
35b71b66c9c2031c786d4072183f0398b642badf
19,600
def is_leap_year(year): """ Indicates whether the specified year is a leap year. Every year divisible by 4 is a leap year -- 1912, 1996, 2016, 2032, etc., are leap years -- UNLESS the year is divisible by 100 AND NOT by 400. Thus, 1200, 1600, 2000, 2400, etc., are leap years, while 1800, 1900, 2100, etc., are not. Why do we have this logic? This is because the earth orbits the sun in approximately 365.2425 days. Over 400 orbits, that comes out to 146,097 days (365.2425 * 400). Ignoring the century rule (making every fourth year a leap year, including 1800, 1900, etc.), gives us 146,100 days, overcounting by 3 days every 400 years. Thus, making 3 out of every 4 century boundaries non-leap years fixes this overcounting. """ return year % 400 == 0 or (year % 4 == 0 and year % 100 != 0)
7e56255a4d3d969f56bb27b9b4474b56b047b022
19,601
def shingle_similarity(s1, s2, size=1): """Shingle similarity of two sentences.""" def get_shingles(text, size): shingles = set() for i in range(0, len(text) - size + 1): shingles.add(text[i:i + size]) return shingles def jaccard(set1, set2): x = len(set1.intersection(set2)) y = len(set1.union(set2)) return x, y x, y = jaccard(get_shingles(s1, size), get_shingles(s2, size)) return x / float(y) if (y > 0 and x > 2) else 0.0
49a161f1745ed5ef8b412504687c37415261d0fd
19,602
import argparse import os def args(): """ 命令行参数以及说明 """ parser = argparse.ArgumentParser() parser.add_argument('-r', '--read', dest='read', help='input whois file path') parse_args = parser.parse_args() # 参数为空 输出--help命令 if parse_args.read is None: parser.print_help() os._exit(0) return parse_args.read
6e2ac8c3d87d5afae1da46da9fb38e66660de8cb
19,603
def get_version(pseudo_revision, mergebase, tainted_text, tag): """Returns version based on given args.""" # Build version, trimming mergebase to 7 characters like 'git describe' does # (since 40 chars is overwhelming)! version = '%s-%s' % (pseudo_revision, mergebase[:7]) version += tainted_text if tag: version += '-' + tag return version
2481d672b8f01641ea02d63a8cb0d940c2751ba3
19,606
from numpy import seterr, fill_diagonal def inverse_distance_matrix(Rab): """ Calculate the inverse distance matrix, with zeros on the diagonal Args: Rab (matrix-like): matrix of the atomic distances in squareform Returns: matrix-like: inverse distance matrix, with diagonal values set to 0 """ err = seterr(divide='ignore') rabm1 = 1.0/Rab seterr(**err) fill_diagonal(rabm1, 0.0) return rabm1
69bfdda11889a231d4eb12101c9320b1f4599f25
19,607
import os import logging def get_optional_env(key, default): """ Return the value of an optional environment variable, and use the provided default if it's not set. Arguments keywords: key -- Name of variable we want to get the value default -- Value to return if 'key' not found in environment variables """ if not os.environ.get(key): logging.warning( "The optional environment variable {} is not set, using '{}' as default".format(key, default)) return os.environ.get(key, default)
07a05e72b8bbf645c85c59890a9a455b8dc13c17
19,608
import numpy def convertToSlice(x, increment=False): """ Convert a int, float, list of ints or floats, None, or slice to a slice. Also optionally increments that slice to make it easy to line up lists that don't start with 0. Use this with numpy.array (numpy.ndarray) types to easily get selections of it's elements. Parameters ---------- x : multiple types allowed. int: select one index. list of int: select these index numbers. None: select all indices. slice: select this slice Returns ------- slice : slice Returns a slice object that can be used in an array like a[x] to select from its members. Also, the slice has its index numbers decremented by 1. It can also return a numpy array, which can be used to slice other numpy arrays in the same way as a slice. Examples -------- a = numpy.array([10, 11, 12, 13]) >>> convertToSlice(2) slice(2, 3, None) >>> a[convertToSlice(2)] array([12]) >>> convertToSlice(2, increment=-1) slice(1, 2, None) >>> a[convertToSlice(2, increment=-1)] array([11]) >>> a[convertToSlice(None)] array([10, 11, 12, 13]) >>> a[utils.convertToSlice([1, 3])] array([11, 13]) >>> a[utils.convertToSlice([1, 3], increment=-1)] array([10, 12]) >>> a[utils.convertToSlice(slice(2, 3, None), increment=-1)] array([11]) """ if increment is False: increment = 0 if not isinstance(increment, int): raise Exception("increment must be False or an integer in utils.convertToSlice") if x is None: x = numpy.s_[:] if isinstance(x, list): x = numpy.array(x) if isinstance(x, (int, numpy.integer)) or isinstance(x, (float, numpy.floating)): x = slice(int(x), int(x) + 1, None) # Correct the slice indices to be group instead of index based. # The energy groups are 1..x and the indices are 0..x-1. if isinstance(x, slice): if x.start is not None: jstart = x.start + increment else: jstart = None if x.stop is not None: if isinstance(x.stop, list): jstop = [x + increment for x in x.stop] else: jstop = x.stop + increment else: jstop = None jstep = x.step return numpy.s_[jstart:jstop:jstep] elif isinstance(x, numpy.ndarray): return numpy.array([i + increment for i in x]) else: raise Exception( ( "It is not known how to handle x type: " "{0} in utils.convertToSlice" ).format(type(x)) )
6c041a369205401ef9a60cd8bc2af5678174f54b
19,609
def get_ranking11(): """ Return the ranking with ID 11. """ return [ ("a6", 0.983188), ("a3", 0.980454), ("a5", 0.968182), ("a2", 0.967595), ("a4", 0.808142), ("a1", 0.033316), ]
24ff71def7e23ba61d9acfe13e1da5512072309f
19,610
def prune_container(container): """Prune a tree of containers. Recursively prune a tree of containers, as described in step 4 of the algorithm. Returns a list of the children that should replace this container. Arguments: container (Container): Container to prune Returns: List of zero or more containers. """ # Prune children, assembling a new list of children new_children = [] for ctr in container.children[:]: # copy the container.children list pruned_child = prune_container(ctr) new_children.extend(pruned_child) container.remove_child(ctr) for child in new_children: container.add_child(child) if container.get('message') is None and not len(container.children): # step 4 (a) - nuke empty containers return [] elif (container.get('message') is None and (len(container.children) == 1 or container.parent is not None)): # step 4 (b) - promote children children = container.children[:] for child in children: container.remove_child(child) return children else: # Leave this node in place return [container]
38589c8e4447ef06e79dc34815d6718becbdfd80
19,611
def dollarify(value): """Filter to convert int to dollar value""" return '${:,.2f}'.format(value)
388c25914506ccbefd0ab0b8657f40ec0e827edc
19,612
def findfiles(wo, fn=None): """Return a list CWL workflow files.""" if fn is None: fn = [] if isinstance(wo, dict): if wo.get("class") == "File": fn.append(wo) findfiles(wo.get("secondaryFiles"), fn) elif wo.get("class") == "Directory": fn.append(wo) findfiles(wo.get("secondaryFiles"), fn) else: for w in wo.values(): findfiles(w, fn) elif isinstance(wo, list): for w in wo: findfiles(w, fn) return fn
e8db2aa76507f848f31d35a6b80e9bb0a918d462
19,613
def mask2zero(img): """ Converts masked pixels into zeros :param img: Image contained in the Collection :type img: ee.Image """ theMask = img.mask() return theMask.where(1, img)
a42e3e9924bcf1e839b330a4e3e9e8b49fc36267
19,614
def hotel_name(hotel): """Returns a human-readable name for a hotel.""" if hotel == "sheraton_fisherman_s_wharf_hotel": return "Sheraton Fisherman's Wharf Hotel" if hotel == "the_westin_st_francis": return "The Westin St. Francis San Francisco on Union Square" if hotel == "best_western_tuscan_inn_fisherman_s_wharf_a_kimpton_hotel": return "Best Western Fishermans Wharf" return hotel
d7e6118f25caa59174480c44fc4be63198d0c2c0
19,615
def always_true(*args, **kwargs): # pylint: disable=unused-argument """ Returns ``True`` whatever the arguments are. """ return True
6f755e48a482dba4a3cccc8dad92cb6fbb610a1b
19,616
from typing import List def parse_cmd(commands: List[str]) -> str: """ We need to take into account two cases: - ['python code.py foo bar']: Used mainly with dvc as a library - ['echo', 'foo bar']: List of arguments received from the CLI The second case would need quoting, as it was passed through: dvc run echo "foo bar" """ def quote_argument(arg: str): should_quote = " " in arg and '"' not in arg return f'"{arg}"' if should_quote else arg if len(commands) < 2: return " ".join(commands) return " ".join(map(quote_argument, commands))
cd682115b70fcb0cb74af59f2ca7f5963b844a30
19,617
def excretion_dic(vs, zooplankton, nutrient): """Zooplankton excretes nutrients after eating. Poop, breathing...""" return {nutrient: vs.redfield_ratio_CN * vs.excretion_total}
1fdbe5830793354b4241a94a516f09bd9f3077d5
19,619
def enrich_dataframe(df, name): """Enrich dataframe for KPIs that need to be calculated. Only three KPIs are needed to be calculated : taux_incidence, taux_positivite and taux_occupation """ if(name == 'taux_incidence'): df['taux_incidence'] = df['P']*100000/df['pop'] if(name == 'taux_positivite'): df['taux_positivite'] = df['P']/df['T'] * 100 if(name == 'taux_occupation'): df['TO'] = df['TO']*100 if(name == 'vaccins_vaccines_couv_majeurs'): df['couv_complet'] = 100 * df['n_cum_complet'] / df['pop'] if(name == 'vaccins_vaccines_couv_ado_majeurs'): df['couv_complet'] = 100 * df['n_cum_complet'] / df['pop'] if(name == 'taux_classes_fermees'): df['taux_classes'] = 100* df['nombre_classes_fermees'] / df['nombre_total_classes'] if(name == 'taux_structures_fermees'): df['taux_structures'] = 100* df['nombre_structures_fermees'] / df['nombre_total_structures'] return df
0f4e1f4d05eb96b82d4fcdaee53082194e0ac74d
19,620
def undocumented(func): """Prevents an API function from being documented""" func._undocumented_ = True return func
135de1a166927dda811c6a9f7b6d5f826a13e42d
19,621
def get_mask_bool(mask, threshold=1e-3): """ Return a boolean version of the input mask. Parameters ---------- mask : enmap Input sky mask. threshold : float, optional Consider values below this number as unobserved (False). Returns ------- mask_bool : bool enmap Boolean version of input mask. """ # Makes a copy even if mask is already boolean, which is good. mask_bool = mask.astype(bool) if mask.dtype != bool: mask_bool[mask < threshold] = False return mask_bool
f7950e1b332c5b6de6b963578e510a3e4fe65799
19,622
import torch def activate_gpu(gpu='GPU'): """Use GPU if available and requested by user. Defaults to use GPU if available.""" if torch.cuda.is_available() and gpu.lower() == 'gpu': print('Running on GPU') device = torch.device('cuda:0') else: print('Running on CPU') device = torch.device('cpu') return device
3d5fbda4709a1307373156d8fab269445d24aeda
19,623
def iota_b(t): """This incidence is slightly more complicated.""" if t > 60: return 0.002 elif t > 20: return 0.005 else: return 0
aee9816596601ce46fa8b85087500fffe70db68c
19,625
def round_if_near(value: float, target: float) -> float: """If value is very close to target, round to target.""" return value if abs(value - target) > 2.0e-6 else target
005a7d7110265bbb1abd5f5aef6092fb67186a62
19,626
import os def find_pdfs(top_folder_path): """Walks through folder structure and calls the passed function on each pdf. Accepts the top file directory as first argument and a callback function as the second. The callback function should accept one argument: The pdf's absolute path.""" if not os.path.isdir(top_folder_path): raise ValueError('The passed path is not a valid folder.') pdf_list = [] for top_folder, _, files in os.walk(top_folder_path): for file in files: _, file_type = os.path.splitext(os.path.join(top_folder,file)) if file_type == '.pdf': pdf_list.append(os.path.join(top_folder,file)) return pdf_list
fefab581942d9af9dd17e4ea621aef35f55f639f
19,627
def get_authors() -> str: """Ask for an author until nothing is entered.""" authors = [] while True: author = input("Author: ") if not author: break authors.append(author.title()) return ' and '.join(authors)
2332f9ff2680d2d5612fc2f0a32e1020f0c674a0
19,628
def version_to_tuple(version): """ version_to_tuple(string) -> tuple converts a version as string to tuple, to make versions comparable string to tuple: https://www.codespeedy.com/comma-separated-string-to-tuple-in-python/ """ splitted = [] if version != "": for subnum in version.split('.'): splitted.append(int(subnum)) return tuple(splitted)
a67a633b63c59e3ee46ffa2e9681535342a2723d
19,629
def ksi_of_t_discrete(x_func, T, y0, g): """ Local regressor. Discrete system """ return x_func(T) * (g - y0)
2094a0d5f52dff2938d29794656762dad32af3ac
19,630
import torch def sample_points_with_roi(rois, points, sample_radius_with_roi, num_max_points_of_part=200000): """ Args: rois: (M, 7 + C) points: (N, 3) sample_radius_with_roi: num_max_points_of_part: Returns: sampled_points: (N_out, 3) """ if points.shape[0] < num_max_points_of_part: distance = (points[:, None, :] - rois[None, :, 0:3]).norm(dim=-1) min_dis, min_dis_roi_idx = distance.min(dim=-1) roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1) point_mask = min_dis < roi_max_dim + sample_radius_with_roi else: start_idx = 0 point_mask_list = [] while start_idx < points.shape[0]: distance = (points[start_idx:start_idx + num_max_points_of_part, None, :] - rois[None, :, 0:3]).norm(dim=-1) min_dis, min_dis_roi_idx = distance.min(dim=-1) roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1) cur_point_mask = min_dis < roi_max_dim + sample_radius_with_roi point_mask_list.append(cur_point_mask) start_idx += num_max_points_of_part point_mask = torch.cat(point_mask_list, dim=0) sampled_points = points[:1] if point_mask.sum() == 0 else points[point_mask, :] return sampled_points, point_mask
12095c625efb6b98f4c63b12ac651a559f841e5d
19,631
import copy def sanitise_json_error(error_dict): """ Exact contents of JSON error messages depend on the installed version of json. """ ret = copy.copy(error_dict) chop = len('JSON parse error - No JSON object could be decoded') ret['detail'] = ret['detail'][:chop] return ret
d91162616530ae494274b9a63baf324d22de2c38
19,632
import re def parse_csr(csr_as_text): """Parse a Certificate Signing Request (CSR). Returns the list of domains this CSR affects. """ domains = set([]) common_name = re.search(r'Subject:.*? CN\s*=\s*([^\s,;/]+)', csr_as_text) if common_name is not None: domains.add(common_name.group(1)) for subject_alt_names in re.finditer(r'X509v3 Subject Alternative Name: \n +([^\n]+)\n', csr_as_text, re.MULTILINE | re.DOTALL): for san in subject_alt_names.group(1).split(', '): if san.startswith('DNS:'): domains.add(san[4:]) return sorted(domains)
6ac5b52126741215ad9042cf26a3e6619e33438a
19,634
def get_corpus(data: list) -> list: """ Получение списка всех слов в корпусе :param data: Данные :return: список слов в корпусе """ corpus = [] for phrase in data: for word in phrase.split(): corpus.append(word) return corpus
6cbd38c71c9f926df21561ef6ab57e4e9d07dd95
19,635