content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def cors_allow_xblock_handler(sender, request, **kwargs): # lint-amnesty, pylint: disable=unused-argument """ Sandboxed XBlocks need to be able to call XBlock handlers via POST, from a different domain. See 'xblock_handler' method for details and how security is enforced. Per the corsheaders docs, a signal is the only way to achieve this for just a specific view/URL. """ return request.path.startswith('/api/xblock/v2/xblocks/') and '/handler/' in request.path
f12a2f10df6c541c9db200a82137bca45cced7c4
15,697
import requests def get_json(url, params): """获取网页中json数据""" r = requests.get(url, params=params) return r.json()
e0245871bb334cd83080c39f7d4c0c38db730205
15,698
def pad_batch(batch, pad_id, neox_args): """ pads context lengths in batch with pad_id to equal neox_args.seq_length, and returns the padded batch and the new lengths. batch: torch.Tensor of tokens pad_id: int, integer to use as padding token neox_args: neox_args """ context_lengths = [] for tokens in batch: context_length = len(tokens) if context_length < neox_args.seq_length: tokens.extend([pad_id] * (neox_args.seq_length - context_length)) context_lengths.append(context_length) return batch, context_lengths
4d2b4630c5a84e1eaa7e0e05a45b8cfcd0d9fae8
15,699
def __max_successive_series_len(arr, asc=True, eq=False): """ 寻找最大连续子序列,子序列的下标必须是相连的 例如, 1,2,3 返回3 @param arr: @param asc: @param eq: @return: """ max_area_len = 0 for i in range(len(arr)): for j in range(i + 1, len(arr)): if asc and (arr[j] > arr[j - 1]): max_area_len = max(j - i + 1, max_area_len) elif not asc and (arr[j] < arr[j - 1]): max_area_len = max(j - i + 1, max_area_len) else: i = j return max_area_len
3ddd09a663c493eea0eed367d5a7c98ca3963acc
15,700
def _generate_type_config(configs, old_copyright_heads, new_copyright, update_template_file): """Generate complete configuration for easy use. Args: configs: `list of dict`, original type config old_copyright_heads: `list of str`, old copyright first line new_copyright: `str`, new copyright update_template_file: whether update .template file Returns: `list of dict`, complete configuration """ for config in configs: config['old_heads'] = [config['prefix'] + head for head in old_copyright_heads] config['copyright'] = [config['copyright_start_line'] + '\n'] if config['copyright_start_line'] else [] _new_copyright = [config['prefix'] + line + '\n' for line in new_copyright.split('\n')] config['copyright'].extend(_new_copyright) if config['copyright_end_line']: config['copyright'].append(config['copyright_end_line'] + '\n') if update_template_file: template_config = [] for config in configs: config = config.copy() config['file_suffix'] = config['file_suffix'] + '.template' template_config.append(config) configs.extend(template_config) return configs
e55797d7ef0060d0eed4033794dbab6c62fb3107
15,701
def reverse_edges(edges): """Reverses direction of dependence dict. Parameters ---------- edges : dict Dict of the form {a: {b, c}, b: set(), c: set()} where b and c depend on a. Returns ------- Dict of the form {a: set(), b: {a}, c: {a}} where b and c depend on a. Examples -------- .. testcode:: from nengo.utils.graphs import reverse_edges d = {0: {1, 2}, 1: {2, 3}, 2: set(), 3: set()} print(reverse_edges(d)) .. testoutput:: {0: set(), 1: {0}, 2: {0, 1}, 3: {1}} Notes ----- dict order are not deterministic. As we iterate on the input dict, it make the output of this function depend on the dict order. So this function output order should be considered as undeterministic. """ result = {k: set() for k in edges} for key in edges: for val in edges[key]: result[val].add(key) return result
d0de015c2b26f6ba211009b6d7bef4c9a9750baf
15,702
import random def noise_seed(): """ Generate a random seed for NoiseSource.block(). """ return random.randint(0, 2**32 - 1)
caf4ed6117dd542c042be1499ee8d58af2662c9c
15,704
import os def set_default_config(config: dict) -> dict: """Set the default configuration options on a loaded config dictionary""" env_service_name = os.environ.get("LIGHTBUS_SERVICE_NAME") if env_service_name: config.setdefault("service_name", env_service_name) env_process_name = os.environ.get("LIGHTBUS_PROCESS_NAME") if env_process_name: config.setdefault("process_name", env_process_name) config.setdefault("apis", {}) config.setdefault("bus", {}) config["apis"].setdefault("default", {}) config["bus"].setdefault("schema", {}) config["apis"]["default"].setdefault("rpc_transport", {"redis": {}}) config["apis"]["default"].setdefault("result_transport", {"redis": {}}) config["apis"]["default"].setdefault("event_transport", {"redis": {}}) config["bus"]["schema"].setdefault("transport", {"redis": {}}) return config
73567c128fde8aa1938224091221fb3e46717d3a
15,705
def ProcCSV(lst): """ Processes lst for delimiters and sorts them into a multi-dimensional array """ OutList = [] MegaList = [] for element in lst: for item in element.split(",,"): OutList.append(item.strip("\n")) for item in OutList: MegaList.append(item.split(",")) return MegaList
09a5e5d69dd56ae35b62fb6e32dbef86fa6b1556
15,706
def getminmax_linear_search(arr): """ Linear method Initialize values of min and max as minimum and maximum of the first two elements respectively. Starting from 3rd, compare each element with max and min, and change max and min accordingly """ if len(arr) == 0: return None, None if len(arr) == 1: return arr[0], arr[0] min_num = None max_num = None if arr[0] > arr[1]: max_num = arr[0] min_num = arr[1] else: max_num = arr[1] min_num = arr[0] for idx in range(2, len(arr)): if min_num > arr[idx]: min_num = arr[idx] if max_num < arr[idx]: max_num = arr[idx] return min_num, max_num
6c2f19b7a4d12e45b570b89ea40b28aa9239a8fe
15,707
def get_scaled_unsup_weight_max(num_labels, X_train_shape, unsup_weight_max=100.0): """Use to calculate the coefficient of ppmiloss""" return unsup_weight_max * 1.0 * num_labels / X_train_shape
c2f59409464ea9f0a3eab620ecc3749fd69a6fab
15,708
def prepare_filter_query(dates, tags, mode): """Query to get filtered tasks data from database.""" if mode == "OR": return 'SELECT id, name, total_time, description, ' \ 'creation_date FROM tasks JOIN activity ' \ 'ON activity.task_id=tasks.id JOIN tasks_tags ' \ 'ON tasks_tags.task_id=tasks.id ' \ 'JOIN (SELECT task_id, sum(spent_time) ' \ 'AS total_time ' \ 'FROM activity GROUP BY task_id) AS act ' \ 'ON act.task_id=tasks.id WHERE date IN ({1}) ' \ 'OR tag_id IN ({0}) ' \ 'GROUP BY act.task_id'. \ format(",".join(map(str, tags)), "'%s'" % "','".join(dates)) else: if dates and tags: return 'SELECT DISTINCT id, name, total_time, ' \ 'description, creation_date FROM tasks JOIN ' \ '(SELECT task_id, sum(spent_time) AS total_time ' \ 'FROM activity WHERE activity.date IN ({0}) ' \ 'GROUP BY task_id) AS act ' \ 'ON act.task_id=tasks.id JOIN (SELECT tt.task_id' \ ' FROM tasks_tags AS tt WHERE ' \ 'tt.tag_id IN ({1}) GROUP BY tt.task_id ' \ 'HAVING COUNT(DISTINCT tt.tag_id)={3}) AS x ON ' \ 'x.task_id=tasks.id JOIN (SELECT act.task_id ' \ 'FROM activity AS act WHERE act.date IN ({0}) ' \ 'GROUP BY act.task_id HAVING ' \ 'COUNT(DISTINCT act.date)={2}) AS y ON ' \ 'y.task_id=tasks.id'. \ format("'%s'" % "','".join(dates), ",".join(map(str, tags)), len(dates), len(tags)) elif not dates: return 'SELECT DISTINCT id, name, total_time, ' \ 'description, creation_date FROM tasks ' \ 'JOIN (SELECT task_id, sum(spent_time) ' \ 'AS total_time FROM activity GROUP BY ' \ 'task_id) AS act ON act.task_id=tasks.id ' \ 'JOIN (SELECT tt.task_id FROM tasks_tags ' \ 'AS tt WHERE tt.tag_id IN ({0}) GROUP BY ' \ 'tt.task_id HAVING ' \ 'COUNT(DISTINCT tt.tag_id)={1}) AS x ON ' \ 'x.task_id=tasks.id'. \ format(",".join(map(str, tags)), len(tags)) elif not tags: return 'SELECT DISTINCT id, name, total_time, ' \ 'description, creation_date FROM tasks ' \ 'JOIN (SELECT task_id, sum(spent_time) ' \ 'AS total_time FROM activity WHERE activity.date' \ ' IN ({0}) GROUP BY task_id) AS act ' \ 'ON act.task_id=tasks.id JOIN (SELECT ' \ 'act.task_id FROM activity AS act ' \ 'WHERE act.date IN ({0}) GROUP BY act.task_id ' \ 'HAVING COUNT(DISTINCT act.date)={1}) AS y ' \ 'ON y.task_id=tasks.id'.format("'%s'" % "','" .join(dates), len(dates))
914daa4440347909d3bcaa61eaf3db12a4033c66
15,709
import sys def get_python_version(): """Function Docs.""" python_version = sys.version_info[0] return python_version
378ae6701239b2aa30a57107e74a464413a7fff0
15,710
def find_empty_cells(board): """Returns the empty cells of the board.""" return [x for x in board if x in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]
b7a976f910710f7e10f1ad60804a6d7e22550da1
15,713
def make_minibatch(arg, start, stop): """ Does not handle off-size minibatches """ if len(arg.shape) == 3: return [arg[:, start:stop]] else: return [arg[start:stop]]
64a1fd2b1acb1eaa3f352ee01494bf554e05a38c
15,714
import base64 def to_b64(bytestring): """" return a base64 encoded string""" return base64.b64encode(bytestring).decode('utf8')
1ab2eeef3d4ba28f763b353ca0efda5f755e576c
15,716
def conv_decay_b_a(decay_b): """Converts a fractionnal decay constant dictionnary to an absolute decay constant dictionnary (fractionnal decay dictionnary stores decay constant as fraction over total decay constant, absolute decay dictionnary stores absolute values of decay constants). **Note**: Library must be compatible with ONIX format. Parameters ---------- decay_b: dict The fractionnal decay constant dictionnary to be converted """ decay_a = {} for i in decay_b: if decay_b[i] == 'stable': decay_a[i] = 'stable' else: decay_a[i] = {} decay_a[i]['half-life'] = decay_b[i]['half-life'] decay_a[i]['total decay'] = decay_b[i]['total decay'] for reac in decay_b[i]: if reac not in ['half-life', 'total decay']: decay_a[i][reac] = decay_b[i][reac]*decay_b[i]['total decay'] return decay_a
bb0f60563024e974d36c63db18c0d10f65f67277
15,718
def is_set_nickname(string, nickname): """ Test if this is a nickname setting message """ if string.startswith(f"{nickname} set the nickname for "): return True if string.startswith(f"{nickname} set his own nickname to"): return True if string.startswith(f"{nickname} set her own nickname to"): return True if string.startswith(f"{nickname} set your nickname to"): return True return False
aec4909f3c4d3dea689383cc73d26623a16dfd85
15,719
from typing import Optional import os def read_IMG(fp:str, filesize:Optional[int]=None) -> bytes: """\ Reads in the IMG file at fp, returning the contents as a bytes-type. If filesize is non-None, a ValueError will be raised if the size in bytes of the file does not match the value in filesize.""" if filesize is not None: if os.path.getsize(fp) != filesize: raise ValueError("filesize does not match") with open(fp, "rb") as f: data = f.read() return data
b0d3012fdfdb3a93fa36d9763d914d93f36b7efa
15,720
import itertools def _get_aps_claims(namespace): """ Get claims in a list with paragraphs separated by newlines. Parameters ---------- namespace : uspto_parsing_tools.aps.NameSpace Namespace to parse. Returns ------- list[str] """ claims = list() start_i = 0 while True: try: first_p = next(itertools.dropwhile(lambda tag: not tag.is_paragraph(), namespace.data[start_i:])) except StopIteration: break first_p_i = namespace.data.index(first_p) claim_tags = itertools.takewhile(lambda tag: tag.is_paragraph(), namespace.data[first_p_i:]) claim = '\n'.join(tag.data for tag in claim_tags) start_i = first_p_i + claim.count('\n') + 1 claims.append(claim) return claims
baf907cc14654d8c9817fc18bb9e489411b8252e
15,721
import itertools import re def check_symbol(InputSet): """ Check the symbol line in POSCAR and write corresponding POTCAR file Note: there are two ways to write the magmom: site_properties: magmom or other properties? run the after self.write_input, then all the thing is written into INCAR INCAR: parse the magmom or other list-like properties Paramter -------- InputSet: VaspInputSet The input set defined by pymatgen, e.g. MPRelaxSet Return ------ symbol: list(str) natom: list(str) """ struc = InputSet.structure syms = [site.specie.symbol for site in struc] incar_dict = InputSet.incar.as_dict() if "MAGMOM" in incar_dict: magmom = incar_dict["MAGMOM"] syms = [syms[i]+str(magmom[i]) for i in range(len(syms))] symbol = [a[0] for a in itertools.groupby(syms)] symbol = ["".join(re.findall(r"[A-Z][a-z]*", symboli)) for symboli in symbol] natom = [str(len(tuple(a[1]))) for a in itertools.groupby(syms)] return symbol, natom
fbdeb644cfad18560fa36d5c32956ebe6a24f50e
15,722
def chunks(l, n): """Yield successive n-sized chunks from l. Args: l:``list`` list of data n:``int`` n-sized Return: list of list: [[]] """ temp_l = [] for i in range(0, len(l), n): temp_l.append(l[i : i + n]) return temp_l
75e3e987bab8ea1f5f5aada5851fc0692ea69ccc
15,723
import os import sys def read_list(file_path): """ reads the sample names or combinations """ if not os.path.exists(file_path): print("File {} does not exist".format(file_path)) sys.exit() items = [] with open(file_path, "r") as in_file: for line in in_file: items.append(line.strip()) return items
0b979172a453f301c5d40d2e28ca2bc69aeeeffd
15,724
def search(nums, target): """ :type nums: List[int] :type target: int :rtype: int """ s = 0 e = len(nums)-1 while s <= e: m = s + (e - s)//2 if target == nums[m]: return m if target > nums[m]: s = m + 1 else: e = m - 1 return -1
2ad08b154fc6dbc3179e0a34cc7e48da3c8f8aaa
15,725
def FakeConduitCall(arc_root, conduit_name, json_input={}): """Returns a hard-coded value for any conduit-call we need.""" if conduit_name == 'user.query': return [ { "phid": "PHID-USER-00000000000000000000", "userName": "csilvers", "realName": "Craig Silverstein", "image": "", "uri": "https:\/\/example.com\/p\/csilvers\/", "roles": [ "admin" ], }, { "phid": "PHID-USER-11111111111111111111", "userName": "ben", "realName": "Ben Bentastick", "image": "", "uri": "https:\/\/example.com\/p\/ben\/", "roles": [], }, { "phid": "PHID-USER-22222222222222222222", "userName": "echo", "realName": "Ben Echoman", "image": "", "uri": "https:\/\/example.com\/p\/echo\/", "roles": [], }, { "phid": "PHID-USER-33333333333333333333", "userName": "toom", "realName": "Toomany Bens", "image": "", "uri": "https:\/\/example.com\/p\/toom\/", "roles": [], }, { "phid": "PHID-USER-44444444444444444444", "userName": "Upper", "realName": "Uppercase Username", "image": "", "uri": "https:\/\/example.com\/p\/Upper\/", "roles": [], }, { "phid": "PHID-USER-55555555555555555555", "userName": "admin1", "realName": "Enabled Admin", "image": "", "uri": "https:\/\/example.com\/p\/admin1\/", "roles": [ "admin", ], }, { "phid": "PHID-USER-66666666666666666666", "userName": "admin2", "realName": "Disabled Admin", "image": "", "uri": "https:\/\/example.com\/p\/admin2\/", "roles": [ "admin", "disabled" ], }, ] raise NameError('Unexpected conduit_name %s' % conduit_name)
54f1a212363873cfe010b4a251856378c38d926e
15,726
def decay_every_scheduler(step, steps_per_decay, decay_factor): """Gives a scaling factor based on scheduling with a decay every n-steps. Args: step: int; Current step. steps_per_decay: int; How often to decay. decay_factor: float; The amount to decay. Returns: Scaling factor applied to the learning rate on the given step. """ return decay_factor**(step // steps_per_decay)
1a7a0f333cebfbc6851111f9cd83156b72fecc3d
15,727
def filter_ctrl_pert(gse_gsm_info): """ Filter the GSE that do not contain both control and perturbation samples Args: gse_gsm_info: the GSE and GSM info tuple Returns: True if there are both control and perturbation samples, False otherwise """ gse_id, gsm_info = gse_gsm_info sample_types = gsm_info[3] has_ctrl = has_pert = False for sample_type in sample_types: if has_ctrl and has_pert: break if sample_type == "ctrl": has_ctrl = True elif sample_type == "pert": has_pert = True return has_ctrl and has_pert
491fd23723a026ddf68fe3a56d98e097a0732e63
15,728
import numpy def upcontinue(gz, height, xp, yp, dims): """ Upward continue :math:`g_z` data using numerical integration of the analytical formula: .. math:: g_z(x,y,z) = \\frac{z-z_0}{2\pi}\int_{-\infty}^{\infty}\int_{-\infty}^ {\infty} g_z(x',y',z_0) \\frac{1}{[(x-x')^2 + (y-y')^2 + (z-z_0)^2 ]^{\\frac{3}{2}}} dx' dy' .. note:: Data needs to be on a regular grid! .. note:: Units are SI for all coordinates and mGal for :math:`g_z` .. note:: be aware of coordinate systems! The *x*, *y*, *z* coordinates are: x -> North, y -> East and z -> **DOWN**. Parameters: * gz : array The gravity values on the grid points * height : float How much higher to move the gravity field (should be POSITIVE!) * xp, yp : arrays The x and y coordinates of the grid points * dims : list = [dy, dx] The grid spacing in the y and x directions Returns: * gzcont : array The upward continued :math:`g_z` """ if xp.shape != yp.shape: raise ValueError("xp and yp arrays must have same shape") if height < 0: raise ValueError("'height' should be positive") dy, dx = dims area = dx * dy deltaz_sqr = (height) ** 2 gzcont = numpy.zeros_like(gz) for x, y, g in zip(xp, yp, gz): gzcont += g * area * \ ((xp - x) ** 2 + (yp - y) ** 2 + deltaz_sqr) ** (-1.5) gzcont *= abs(height) / (2 * numpy.pi) return gzcont
152d88c9dce4349fa808b6420009e54a889455d0
15,729
def method(row_i): """ """ # row_i = df_slab_old.iloc[0] atoms = row_i.slab_final num_atoms = atoms.get_number_of_atoms() return(num_atoms)
06e759a22343bb0ecfb0e16a354c10096f28a237
15,731
def dateToZulu(x): """Return date in Zulu format NOT IMPLEMENTED. """ return x;
018af6b11c015d6441c9f2ebeda8e6ebb01c127d
15,735
def mon_decorateur(fonction): """Notre décorateur : il va afficher un message avant l'appel de la fonction définie""" def fonction_modifiee(): """Fonction que l'on va renvoyer. Il s'agit en fait d'une version un peu modifiée de notre fonction originellement définie. On se contente d'afficher un avertissement avant d'exécuter notre fonction originellement définie""" print("Attention ! On appelle {0}".format(fonction)) return fonction() return fonction_modifiee
5c4ad105613da1884e98375187bb712996342300
15,738
def construct_base_url(bible, text): """Return the base URL for BIBLE and TEXT. BIBLE is 'KJV' or 'LEB' TEXT is 'xml' or 'json' or 'txt' """ base_url = 'http://api.biblia.com/v1/bible/content/' url = base_url + bible + '.' + text return url
aa4ab823808d186a2830f1531706919ef0afd98f
15,739
import os import errno def create_data_folders(run_stats, path_to_folder): """ create folders """ # path to folder where to save to path_saving = path_to_folder + run_stats["name"] + "/" # try to create folder if not exists yet try: os.makedirs(path_saving) except OSError as e: if e.errno != errno.EEXIST: raise # path for full dirty dataset try: os.makedirs(path_saving + "/full/") except OSError as e: if e.errno != errno.EEXIST: raise # path for train dataset try: os.makedirs(path_saving + "/train/") except OSError as e: if e.errno != errno.EEXIST: raise # path for validation dataset try: os.makedirs(path_saving + "/validation/") except OSError as e: if e.errno != errno.EEXIST: raise # path for test dataset try: os.makedirs(path_saving + "/test/") except OSError as e: if e.errno != errno.EEXIST: raise return path_saving
3301576bb8846e2019116ace859374400ec187e4
15,740
def dsu_sort(idx, seq): """Sorts a list of tuples according to the idx column using a Decorate-Sort-Undecorate method""" for i, e in enumerate(seq): seq[i] = (e[idx], e) seq.sort() seq.reverse() for i, e in enumerate(seq): seq[i] = e[1] return seq
a20f28aa10522d9653a85421104755f31861218c
15,741
def any_user(UserFixture, app, database): """A user without privileges or memberships.""" u = UserFixture( email=f"anyuser@anyuser.org", password="anyuser", ) u.create(app, database) # when using `database` fixture (and not `db`), commit the creation of the # user because its implementation uses a nested session instead database.session.commit() u.identity # compute identity return u
3497a6987a30d834199acabe97709d91703a266a
15,742
def _is_line_from_candidate(line: str, from_imports: list, import_str: str) -> bool: """ Check if line has from import :param line: the line to check :param from_imports: the from imports list :param import_str: the import string :return: True if the line has from import to replace """ if import_str not in line: return False for from_import in from_imports: if line.strip().startswith(from_import): return True return False
04e2d20985fde1a08a9090ac733aafa8c3911968
15,743
import os def get_runtime_url(): """Returns the value of the ACTIONS_RUNTIME_URL var in the environment. Raises an exception if not set.""" url = os.environ.get('ACTIONS_RUNTIME_URL') if not url: raise Exception('Unable to get ACTIONS_RUNTIME_URL env variable') return url
c881030ab3f7fe01de62ecbc656521b1197be44e
15,744
from datetime import datetime def gen_today_file_name() -> str: """ generate today json filename Returns: today_in_history-*.json """ now = datetime.now().strftime('%m-%d') file_today: str = 'today_in_history-%s.json' % now return file_today
37991e761021a1d5742b82359fbdf88d1d58f975
15,745
def is_fractional_sv(input_str): """ This function takes the given text and checks if it is a fraction. Args: input_str (str): the string to check if fractional Returns: (bool) or (float): False if not a fraction, otherwise the fraction """ if input_str.endswith('ars', -3): input_str = input_str[:len(input_str) - 3] # e.g. "femtedelar" if input_str.endswith('ar', -2): input_str = input_str[:len(input_str) - 2] # e.g. "femtedelar" if input_str.endswith('a', -1): input_str = input_str[:len(input_str) - 1] # e.g. "halva" if input_str.endswith('s', -1): input_str = input_str[:len(input_str) - 1] # e.g. "halva" aFrac = ["hel", "halv", "tredjedel", "fjärdedel", "femtedel", "sjättedel", "sjundedel", "åttondel", "niondel", "tiondel", "elftedel", "tolftedel"] if input_str.lower() in aFrac: return 1.0 / (aFrac.index(input_str) + 1) if input_str == "kvart": return 1.0 / 4 if input_str == "trekvart": return 3.0 / 4 return False
fc5b15b449e9209a8836f809796ce9e3a5b27247
15,746
def part1(): """ The tree is made up of nodes; Specifically, a node consists of: A header, which is always exactly two numbers: The quantity of child nodes. The quantity of metadata entries. Zero or more child nodes (as specified in the header). One or more metadata entries (as specified in the header). What is the sum of all metadata entries? """ with open('input/day8.txt') as input_file: tree_as_list = [int(n) for n in input_file.readline().split(' ')] def process_node(input_list): my_sum = 0 children = input_list.pop(0) metadata_entries = input_list.pop(0) for i in range(children): my_sum += process_node(input_list) for i in range(metadata_entries): my_sum += input_list.pop(0) return my_sum print(process_node(tree_as_list))
efadc3ae94ea1827e30efd8aa27941627276d4a6
15,747
def get_valid_messages(log, expiry): """ Return only the messages that haven't expired. """ valid = [] for message in log: try: timestamp = int(message.get('timestamp', 0)) if timestamp > expiry: valid.append(message) except ValueError: continue return valid
d24e9bc5e6d0a2c0efd9442f0d36f6e248b51137
15,750
import os def get_all_files(base, follow_symlinks=False): """Returns a list of all the files in |base|. Each entry is relative to the last path entry of |base|.""" result = [] for root, _, files in os.walk(base, followlinks=follow_symlinks): result.extend([os.path.join(root, f) for f in files]) return result
e1ae399a5d9ae548b9fe0f48db88b0984f32b776
15,751
def create_perturbation(pert_info): """Create perturbation.""" pert = pert_info[3](pert_info[1], None, pert_info[2]) return pert
22c5fdcdf7e8e1745b5d717794d39135d7e278ea
15,753
import os def getmount_point(opt): """ """ tarfile = os.path.basename(opt.tarfile) mpath = opt.mountpoint if not mpath: (mpath, ext) = os.path.splitext(tarfile) if ext and mpath: if not os.path.exists(mpath): os.mkdir(mpath) return mpath elif os.path.isdir(mpath): return mpath raise Exception("Please specify a correct mountpoint") return mpath
28919d76b4b75098aec193453e8306ac812c3108
15,754
import re def validate_uuid(hash): """Check if the user supplied valid hash to the view. That should save him the wait... :D""" regex = '^[0-9a-f]{8}-[0-9a-f]{4}-[0-5][0-9a-f]{3}-[089ab][0-9a-f]{3}-[0-9a-f]{12}$' if re.search(re.compile(regex), hash): return True else: return False
f89545eb249b959c837f840921339857bb8ce29f
15,755
def calcular_acumulada(dist_discreta, inicio, fin, parametros): """ Calcula la distribución acumulada dependiendo de inicio y fin params :inicio x inicial desde donde partir a calcular :fin x final hasta donde calcular :parametros parametros de la distribucion (ej, Binomial [n, p]) """ parametros.insert(0, 0) suma = 0 for i in range(inicio, fin + 1): parametros[0] = i dist_discreta.iniciar(parametros) suma += dist_discreta.probabilidad() return suma
0bd8aa720d2604a7c6c599161dc59912611257dd
15,757
import os import subprocess import shlex def safe_check_output(cmd, first_row_only=True): """Error-tolerant version of subprocess check output""" if os.name == "posix": cmd = f'/bin/sh -c "{cmd}; exit 0"' else: cmd = f'bash -c "{cmd}; exit 0"' output = subprocess.check_output( shlex.split(cmd), stderr=subprocess.STDOUT ).decode("utf-8") if first_row_only and output != "": return output.splitlines()[0] else: return output
ecd6f14227e960adec369cefbfbad5ff12b8d9a9
15,759
import os def has_download_finished(path): """Checks if download has finished. Args: path: Absolute PATH for folder. Returns: Boolean of download status. """ finished = True for file in os.listdir(path): if 'crdownload' in file: finished = False return finished
d18e2d7768622363d4241a8777e8acd534934eb2
15,761
def s3_bucket_suffix_for(path): """returns bucket suffix for product delivery paths""" path = path.lstrip("/") if (path.startswith("pub/firefox/bundles/") or path.startswith("pub/firefox/try-builds/")): return "archive" if path.startswith("pub/firefox/"): return "firefox" if (path.startswith("pub/labs/") or path.startswith("pub/webtools/") or path.startswith("pub/nspr/") or path.startswith("pub/security")): return "contrib" return "archive"
d6fd0c12069dff4ff4921483f13d028a8d22f410
15,762
def decodeSurrogatePair(hi, lo): """Returns a scalar value that corresponds to a surrogate pair""" return ((ord(hi) - 0xD800) * 0x400) + (ord(lo) - 0xDC00) + 0x10000
018eb6678052af00cd1b8fe51d58230e26c44bd8
15,763
import socket def _get_free_port(host): """ Gets a free port by opening a socket, binding it, checking the assigned port, and then closing it. """ s = socket.socket() s.bind((host, 0)) port = s.getsockname()[1] s.close() return port
85e08ba0a7832c5de0620634d2285bb317cde751
15,764
import os async def loki_tester_charm(ops_test): """A charm for integration test of the Loki charm.""" charm_path = "tests/integration/loki-tester" clean_cmd = ["charmcraft", "clean", "-p", charm_path] await ops_test.run(*clean_cmd) bad_rule_path = "tests/integration/loki-tester/src/loki_alert_rules/free-standing/error.rule" try: os.remove(bad_rule_path) except FileNotFoundError: pass charm = await ops_test.build_charm(charm_path) return charm
32ba2de4e24f2d2a4112f32a9cbd27ad73e798f1
15,766
import os def get_include(): """ Return the directory that contains the DPNP C++ backend \\*.h header files. """ dpnp_path = os.path.join(os.path.dirname(__file__), "backend", "include") return dpnp_path
f3c803b6f80444e4ab73302aef515f4d05be1266
15,767
import hashlib def calculate_variant_md5(chromosome, position, reference, alternate): """Calculate MD5 hash for a variant Args: chromosome (str): Chromosome position (int): Genomic position reference (str): Reference allele alternate (str): Alternate allele Returns: str: MD5 hash for a variant """ key = '|'.join(list(map(str, [chromosome, position, reference, alternate]))) return hashlib.md5(key.encode('utf-8')).hexdigest()
36a201f05a8e2a09c2e567acf02089d01462248c
15,769
import os def VariantCalling_Medaka(probs, ref): """ passes the commands of samtools for generating a .depth file Requires the entery of the consensus probabilities file Also """ Output_file = probs.split("consensus_probs")[0] + "medaka_variant.vcf" commands = "medaka variant --verbose " + ref + " " + probs + " " + Output_file os.system(commands) exist_status = os.system(commands) if (exist_status != 0): print('Fail to run medaka variant call commands\n please ensure that the tool is installed and run again the pipeline') exit(0) print ("medaka_variant.vcf file was created") return Output_file
3f5a02a9577047bc1f7a17ad71037bd5a5eb3c03
15,770
def subtract_segment(a, b): """ Subtract segment a from segment b, return 'a' if no overlap """ if a[0]>=b[0] and a[0]<=b[1] and a[1]>b[1]: return (b[1]+1,a[1]) elif a[0]<b[0] and a[1]>=b[0] and a[1]<=b[1]: return (a[0], b[0]-1) elif a[0]<b[0] and a[1]>b[1]: return [(a[0],b[0]-1), (b[1]+1,a[1])] elif a[0]>=b[0] and a[1]<=b[1]: return [] else: return a
28c94a76694d7b061143a416596af325e15ccb7b
15,771
import torch def _find_max_response_value(train_X, train_Y): """ determines best (max) response value max_X across recorded values in train_Y, together with the corresponding X values :param train_X (torch.tensor) :param train_Y (torch.tensor) :return max_X (float): the X values corresponding to max_Y :return max_Y (float): the maximum Y value recorded """ idmax = train_Y.argmax().item() max_X = torch.tensor([train_X[idmax].numpy()], dtype=torch.double) max_Y = torch.tensor([train_Y[idmax].numpy()], dtype=torch.double) return max_X, max_Y
8be4afaa622f42f23859bb6ec5262d1cd2458781
15,772
def score(source_data: list, weights: list, *args) -> list: """Analyse and score a dataset using a range based percentual proximity algorithm and calculate the linear maximum likelihood estimation. Args: source_data (list): Data set to process. weights (list): Weights corresponding to each column from the data set. 0 if lower values have higher weight in the data set, 1 if higher values have higher weight in the data set Optional args: "score_lists" (str): Returns a list with lists of each column scores. "scores" (str): Returns only the final scores. Raises: ValueError: Weights can only be either 0 or 1 (int) Returns: list: Source data with the score of the set appended at as the last element. """ # getting data data_lists = [] for item in source_data: for i, val in enumerate(item): try: data_lists[i].append(float(val)) except IndexError: data_lists.append([]) data_lists[i].append(float(val)) # calculating price score score_lists = [] for dlist, weight in zip(data_lists, weights): mind = min(dlist) maxd = max(dlist) score = [] if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind))) except ZeroDivisionError: score.append(1) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind)) except ZeroDivisionError: score.append(0) else: raise ValueError("Invalid weight of %f provided" % (weight)) score_lists.append(score) # return score lists if "score_lists" in args: return score_lists # initialize final scores final_scores = [0 for i in range(len(score_lists[0]))] # generate final scores for i, slist in enumerate(score_lists): for j, ele in enumerate(slist): final_scores[j] = final_scores[j] + ele # return only scores if "scores" in args: return final_scores # append scores to source data for i, ele in enumerate(final_scores): source_data[i].append(ele) return source_data
e141bcfa9df9456fd39318ec2b9bc74e7d121c2d
15,773
import math def n_vector(lat, lon): """Converts lat/long to n-vector 3D Cartesian representation.""" # Convert to radians. if not (-90.0 <= lat <= 90): raise ValueError("lat={:2.2f}, but must be in [-90,+90]".format(lat)) rad_lat = math.radians(lat) if not (-180.0 <= lon <= 180): raise ValueError("lon={:2.2f}, but must be in [-180,+180]".format(lon)) rad_lon = math.radians(lon) x = math.cos(rad_lat) * math.cos(rad_lon) y = math.cos(rad_lat) * math.sin(rad_lon) z = math.sin(rad_lat) return x, y, z
dca7be0345b7498ece4f66cceeb6e062e9cda9a0
15,774
import requests import os def get_data(opt): """ get_data - Return dict of fetched job data. Args: opt (argparse.Namespace): command line options Returns: dict: data from USAJOBS.gov """ url = ( "https://data.usajobs.gov/api/Search?" "ResultsPerPage=500&Organization=EP00;EPJF;EPR1&WhoMayApply=All" ) request = requests.get( url, headers={ 'Host': 'data.usajobs.gov', 'User-Agent': os.environ['YOUR_EMAIL'], 'Authorization-Key': os.environ['AUTH_KEY'], }, ) return request.json()
f26e5303b4cd7fe7f3462364d1ce9c3b4e11c24a
15,775
import os import json def recover(job_name, log_dir="."): """ recover pending, errored, other jobs from a checkpoint """ with open(os.path.join(log_dir, "{}.pending".format(job_name)), "r") as f: content = f.read() if len(content) == 0: pending = {} else: pending = json.loads(content) with open(os.path.join(log_dir, "{}.err".format(job_name)), "r") as f: content = f.read() if len(content) == 0: errored = {} else: errored = json.loads(content) with open(os.path.join(log_dir, "{}.other".format(job_name)), "r") as f: content = f.read() if len(content) == 0: other = {} else: other = json.loads(content) return pending, errored, other
f64becb3efb5011dcaaa41ce4d1ea04d827527a4
15,777
def plot_box_reaction_time(config): """Plots the reaction time box plot Parameters ---------- config : yaml The yaml configuration rate. Returns ------- None """ # Using the data from MATLAB file run return None
571c5313b2f8decb5234c99cb6ab6b23bd7cc43d
15,778
def get_fields_for_l3_plot(product: str, model: str) -> list: """Return list of variables and maximum altitude for Cloudnet quicklooks. Args: product (str): Name of product, e.g., 'iwc'. model (str): Name of the model, e.g., 'ecmwf'. Returns: list: List of wanted variables """ if product == "l3-iwc": fields = [f"{model}_iwc", f"iwc_{model}"] elif product == "l3-lwc": fields = [f"{model}_lwc", f"lwc_{model}"] elif product == "l3-cf": fields = [f"{model}_cf", f"cf_V_{model}"] else: raise NotImplementedError return fields
6a757c48cfb168c86912315e95e9f70c63458c6b
15,779
def get_fk_query_name(model, related_model): """ Format the DB column name of a foreign key field of a model with the DB table of the model. Finds the foreign key relating to related model automatically, but assumes that there is only one related field. Args: model (Model): The model for which the foreign key field is searched. related_model (Model): A model related to `model`. Returns: str: The formated foreign key column name. """ related_field = [f for f in model._meta.get_fields() if f.is_relation and f.concrete and f.related_model == related_model] return '%s.%s' % (model._meta.db_table, related_field[0].column)
4b3c145486537274a64d8675f81276ba5018975e
15,780
import subprocess def cmd_to_string(cmd): """Run a system command as in os.system(), but capture the stdout and return it as a string.""" return subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).communicate()[0]
adfb8ebf8cf9ddb0ca7a4b3d9a82e59913dfbd9a
15,781
def compute_something(a: float, b: int) -> float: """Sums `a` and `b`. Args: a: A brief explanation of `a`. b: A brief explanation of `b`. Returns: float: The sum of `a` and `b`. Notes: The addition of an `int` and a `float` returns a `float`. Mathematically, this performs the following operation: .. math:: c = a + b Warnings: The code will not break if you pass two str. """ return a + b
997d6d811d10a2d70606addc76f7e906e8f9c73d
15,783
def sanitize_suffixes(*suffixes): """ Ensure suffixes (which may be passed in from the user) are clean """ return set((suffix.lower() for suffix in suffixes))
9981a932637e1677141ebff02119e48bf9b6cd1a
15,785
def turn_finder(Seq, Table): """ Find the residues that might correspond to turns in a sequence. Conditions are as per the algorithm. Returns a list of indices with predicted turns. """ results = [] for i in range(len(Seq)-4): window = Seq[i:i+4] #Three Conditions Pt = Table[window[0]][3] * Table[window[1]][4] * Table[window[2]][5] * Table[window[3]][6] > 0.000075 Av_PTurn = sum([Table[aa][2] for aa in window])/4 > 1.0 PTurn = sum([Table[aa][2] for aa in window]) > max([sum([Table[aa][0] for aa in window]), sum([Table[aa][1] for aa in window])]) #if all thee conditions are true if Pt and Av_PTurn and PTurn: results.append(i) return results
e85136767184e0fa7c0a8b0097233e7d322d4190
15,786
import math def is_finite(x): """ Returns true if the argument is a float or int and it is not infinite or NaN """ try: return not math.isinf(x) except TypeError: return False
1dc4e253078f73126320a0e80658d52cceabdb07
15,787
def _get_dim_size(start, stop, step): """Given start, stop, and stop, calculate the number of elements of this slice.""" assert step != 0 if step > 0: assert start < stop dim_size = (stop - start - 1) // step + 1 else: assert stop < start dim_size = (start - stop - 1) // (-step) + 1 return dim_size
550f77162570fb6b0608b4f5640a5487dd728ea2
15,789
def compact_capitalized_geography_string(s): """ Go from lowercase "county, state-abbrev" string to Capitalized string Args: s: Returns: Examples: "lancaster, pa" --> "LancasterPA" "anne arundel, md" --> "AnneArundelMD" "st. mary's, md" --> "StMarysMD" """ s = s.replace(',', '').replace('.', '').replace("'", '').title().replace(' ', '') return s[:len(s) - 1] + s[(len(s) - 1):].capitalize()
2393e09007774b965f556af40c1e0cb969362cc2
15,790
def mappings_from_sorted_tables(tables, table_order, reference_fields): """Generate mapping.yml data structures. """ mappings = {} for table_name in table_order: table = tables[table_name] fields = { fieldname: fieldname for fieldname, fielddef in table.fields.items() if (table_name, fieldname) not in reference_fields.keys() } lookups = { fieldname: { "table": reference_fields[(table_name, fieldname)], "key_field": fieldname, } for fieldname, fielddef in table.fields.items() if (table_name, fieldname) in reference_fields.keys() } mappings[f"Insert {table_name}"] = { "sf_object": table_name, "table": table_name, "fields": fields, "lookups": lookups, } return mappings
48e6cba1c06eaee6146a04cb469641830f771515
15,791
def update_direction(direction, turn): """Return the directions ID after the given turn.""" answer = None if turn == "R": answer = (direction + 1) % 4 elif turn == "L": answer = (direction - 1) % 4 return answer
a8ea3da50df3cbca3af2a4f42de764d1031285a6
15,792
def partition(lst, fn): """Partition lst by predicate. - lst: list of items - fn: function that returns True or False Returns new list: [a, b], where `a` are items that passed fn test, and `b` are items that failed fn test. >>> def is_even(num): ... return num % 2 == 0 >>> def is_string(el): ... return isinstance(el, str) >>> partition([1, 2, 3, 4], is_even) [[2, 4], [1, 3]] >>> partition(["hi", None, 6, "bye"], is_string) [['hi', 'bye'], [None, 6]] """ true_list = [] false_list = [] for val in lst: if fn(val): true_list.append(val) else: false_list.append(val) return [true_list, false_list]
74b4a293bc13c06759a5334164d80f63651aefab
15,794
from datetime import datetime def as_pydatetime(d, ts): """returns a datetime object. d: date object ts: tm_struct """ return datetime(year=d.year, month=d.month, day=d.day, hour=ts.tm_hour, minute=ts.tm_min, second=ts.tm_sec)
869f411f11583a14759ef089d175080c3479c685
15,795
import numpy def _percentile_factory(perc): """Percentile function usable within a group. Source: https://stackoverflow.com/a/54593214 """ def percentile_(values): return numpy.percentile(values, perc) percentile_.__name__ = f"percentile_{perc}" return percentile_
1f6b8ed9e7ffc41e33dbd4ee9178285327175fd6
15,796
import unicodedata def charwidth(char): """ The width of a single character. Ambiguous width is considered 1""" cat = unicodedata.category(char) if cat == "Mn": return 0 eaw = unicodedata.east_asian_width(char) if eaw == "Na" or eaw == "H": return 1 if eaw == "F" or eaw == "W": return 2 if eaw == "A": return 1 if eaw == "N": return 1 raise Exception("unknown east easian width for character {}: {}".format(ord(char), char))
9e0a2bbcb70b23f7c3a41bb9ac938e3b140badf0
15,797
def make_rev_comp(s): """ Generates reverse comp sequences from an input sequence. """ return s[::-1].translate(s[::-1].maketrans("ACGT", "TGCA"))
4cb65d3a2c2345b7189a01affd3f46f2de8ae06b
15,798
def my_function(number_a, number_b): """ test doctest in my_function """ return number_a * number_b
cb0938c07160712c7ad12d98645571e5f80a0cc1
15,799
def get_recess(df, Q, freq='1D', inplace=False): """ Select the data when values are decreasing compared to previous time step :param df: DataFrame of hydro data :param Q: DataFrame field with discharge or water level data :param freq: Frequency of measurement of data; default is 1D :param inplace: If True, replace input DataFrame; default is false :return: DataFrame of all of the decreasing segments of the input DataFrame .. note:: from https://github.com/stijnvanhoey/hydropy """ recess = df[Q].diff() < 0.0 if inplace: df = df else: df = df[recess].copy() df = df.resample(freq).mean() return df
b9afc9eae0a24db48b29c08579bd0b3ce2104dcd
15,800
import json def createMessage(message): """Create a JSON string to be returned as response to requests""" return json.dumps({"message": message})
3a37a494e0876af8f176338c16e81b41a27993d5
15,801
def pack(fmt, *args): # known case of _struct.pack """ pack(fmt, v1, v2, ...) -> bytes Return a bytes object containing the values v1, v2, ... packed according to the format string fmt. See help(struct) for more on format strings. """ return b""
5abcc237f37802c39a392994ab3042f35619f275
15,802
def normder(n,m): """coefficient of d^m / dx^m x^n""" c=1 for i in range(m): c*=n-i return c
a57afadd4bccff28471e30654473e05265ebdeb1
15,803
def _safe_decode(output_bytes: bytes) -> str: """ Decode a bytestring to Unicode with a safe fallback. """ try: return output_bytes.decode( encoding='utf-8', errors='strict', ) except UnicodeDecodeError: return output_bytes.decode( encoding='ascii', errors='backslashreplace', )
0f4b3d9e04b910d0ccfa2fde12e209901cadf52c
15,804
def check_have_space(data, _): """An attacker has space if there is no defender in the same grid square""" for opponent in data["opposition"]: if opponent["coordinates"] == data["attacker"]["coordinates"]: print(f"{opponent['name']} is closing down {data['attacker']['name']}.") return False print(f"No defender near {data['attacker']['name']} . . .") return True
a4e734f41b1b6c93351501a0bf29e3b7a2c430c0
15,805
def extract_description_for(values, ranks, thresholds, comparison_string): """ Report values from df_with_ranks[col] by thresholds. :param values: Series containing values, indexed by probe_id :param ranks: Series containing ranks, indexed by probe_id :param thresholds: Thresholds for reporting out those values :param comparison_string: A string used to describe the comparison underlying the p-value :return: descriptive text """ if len(thresholds) < 1: return "No thresholds requested" # Calculate a quantity of values under each threshold, # and pair the quantity with the threshold. quants = list(zip([(values < t).sum() for t in thresholds], thresholds)) print("{} values less than {} in {}".format(*quants[0], values.name, )) return "{}: {:,} {} {}-{}, p<{} ({}). {} of these {:,} genes is {:,}".format( values.name[-4:], quants[0][0], "genes perform better in", values.name[-4:], comparison_string, thresholds[0], ", ".join(["{:,} <{:0.2f}".format(*q) for q in quants]), comparison_string, quants[0][0], 0 if quants[0][0] < 1 else int(ranks[values[values < 0.05].index].mean()), )
8ff4dc7454c91e30b7dc2bc1b79b934169b946c5
15,806
def example_batch_to_list(example_batch, num_templates): """Convert a single batch item in a dataset to a list of items. Say you have a dataset where each item is shape {question: (), answer: ()}. An example_batch will be a batched example with shape {question: (None,), answer: (None,)}. This will convert this example_batch to a list of examples, each with shape {question: (), answer: ()}. Args: example_batch: a single batch item in a dataset num_templates: the number of templates that are written, equal to batch size Returns: A list of items. """ return [ {k: v[i] for k, v in example_batch.items()} for i in range(num_templates) ]
c0eba5fee52ba59de2d59844810ccc58ace9a805
15,807
from typing import Sequence def get_rank_upto( ranks: Sequence[str], ter_rank: str, include_terminal: bool = False ) -> Sequence[str]: """Generates list of ranks from `ranks` terminated at `ter_rank` Parameters ---------- ranks List of ranks ter_rank Terminal rank include_terminal Include terminal/last rank or not Returns ------- list List of ranks """ ret = [] tmp_ranks = list(ranks) if ter_rank in tmp_ranks: ter_index = ( tmp_ranks.index(ter_rank) + 1 if include_terminal else tmp_ranks.index(ter_rank) ) if ter_index != 0: ret = tmp_ranks[:ter_index] return ret
9f5e21dbc80652c444e3f049d7e0bb40aca76203
15,809
import re def StripColor(string): """Returns string with color escape codes removed.""" regex = re.compile(r'\x03(?:\d{1,2}(?:,\d{1,2})?)?', re.UNICODE) return regex.sub('', string)
04a253edf0842a2bda44d8c124445158588215e7
15,810
def sumar(a, b): """ Sumar dos números a y b """ z = a+b return z
40c820dcddeb4b97d8de0f111313b37e47c5002c
15,811
def indent(string, nspaces): """ Indents a multiline string. Required for Python 2, import textwrap textwrap.indent works for Python 3 """ pad = nspaces * ' ' indented_string = ''.join(pad+line for line in string.splitlines(True)) return indented_string
531ce33bdf62a220bfddb02228953b424495919c
15,812
def hex_to_bin(hex_str: str, width: int = 32) -> str: """Converts hex string to binary string Parameters ---------- hex_str : str hexadecimal string to convert width : int, optional width of binary output (used for zero padding), default=32 Returns ------- str binary array as string Raises ------ ValueError raises ValueError if supplied width is not wide enough for binary string """ if len(hex_str)*4 > width: raise ValueError( f"Hex string of length {len(hex_str)} too large for binary array of width {width}" ) format_str = f"{{0:0{width}b}}" return format_str.format(int(hex_str, 16))
4ad8046e2cd97e04824239feb997381108d67e37
15,815
def flatten_list(alist, howdeep=1): """Flattens nested sequences.""" if howdeep > 0: newlist = [] for nested in alist: try: newlist.extend(nested) except TypeError: newlist.append(nested) howdeep -= 1 alist = flatten_list(newlist, howdeep) return alist
93eb5734d979c0fc32ead0c43893b898f63b7929
15,817
import re def _all_metadata(path, num): """ processes metadata from a (RESEDA) PAD or TOF file """ valuereo = re.compile("[+-]?\d+[\.e+-]{0,2}\d*") unitreo = re.compile("\s[A-Za-z]{1,4}$") # strip " " currentkey = "binarydump" metadict = {currentkey : {}} with open(path(num)) as f: for line in f.readlines(): temp = line.strip().split(':') if len(temp) == 1 and temp[0][:3] == "###": currentkey = temp[0][3:].strip() metadict[currentkey] = {} elif len(temp) == 2: val_result = valuereo.findall(temp[1]) unit_result = unitreo.findall(temp[1]) if len(val_result) == 1 and len(unit_result) != 0: metadict[currentkey][temp[0].strip()] = (float(val_result[0]), unit_result[0]) elif len(val_result) > 1 and len(unit_result) != 0: metadict[currentkey][temp[0].strip()] = (tuple((float(val) for val in val_result)), unit_result[0]) elif len(val_result) > 1 and len(unit_result) == 0: metadict[currentkey][temp[0].strip()] = tuple((float(val) for val in val_result)) elif len(val_result) == 1 and len(unit_result) == 0: try: metadict[currentkey][temp[0].strip()] = int(val_result[0]) except ValueError: try: metadict[currentkey][temp[0].strip()] = float(val_result[0]) except: print("The encountered 'val_result' was neither a integer as string, nor a flaotable string") raise else: metadict[currentkey][temp[0].strip()] = temp[1].strip() elif len(temp) == 3: if temp[1].strip() == "http" or temp[1].strip() == "https": metadict[currentkey][temp[0].strip()] = ":".join((temp[1], temp[2])) else: metadict[currentkey][temp[0].strip()] = (temp[1].strip(), temp[2].strip()) elif len(temp) == 4: metadict[currentkey][temp[0].strip()] = (temp[1].strip(), " : ".join((temp[2].strip(), temp[3].strip()))) del metadict["binarydump"] return metadict
a43091691597093df188ca600db16b92a13d6b28
15,820
def split3(text, pat1, pat2): """Split text in 3 parts: before pat1, between, and after pat2.""" part1, text = text.split(pat1, 1) part2, part3 = text.split(pat2, 1) return part1, part2, part3
5b8bff3b7214a1ac4999bfdc913082a98d1ed1b7
15,821
def parts(a, b): """https://stackoverflow.com/a/52698110""" q, r = divmod(a, b) return [q + 1] * r + [q] * (b - r)
f6bffd65b39b83532d7105966a503e7e8c96db55
15,823
def exclude(items, excludes): """Exclude removes all items in a list that is in the excludes list (for dirs)""" for ex in excludes: items = [x for x in items if not ex in x] # return items return items
5a9de305dbfc804b06e6533dd20030b8cbca00f8
15,824
import argparse def parse_args(): """ Defines commandline arguments and options accepted by this script. """ parser = argparse.ArgumentParser( description='EPUB packaging tools. Create or extract epub files to manipulate files') cmd_choices = ['make', 'extract'] parser.add_argument('cmd', type=str, choices=cmd_choices, help='command to create an epub from a valid tree structure or to extract the contents of the epub file for manipulation') parser.add_argument('input', type=str, help='Path to .opf file to package or to the .epub file to extract') parser.add_argument('target', type=str, default=None, help='Name of target file. If a path to a folder is given, it will output the result to that path') parser.add_argument('-o', "--open", action='store_true', help='Open resulting target ') args = parser.parse_args() for cmd in cmd_choices: setattr(args, cmd, False) setattr(args, args.cmd, True) return args
9fe2cf3c5ff1e576dc412b8a3e13bf6887bbd7d2
15,827
def _GenerateEstimatorConstructor(estimator_class_name, variable_types, variable_names, extension_class_name): """ Generates the consructor for the estimator class. """ code = ["\n\npublic {0}(IHostEnvironment env".format(estimator_class_name)] # Generate the Constructor parameters for variable_type, variable_name in zip(variable_types, variable_names): code.append(", {0}.TransformParameter<{1}> {2}".format(extension_class_name, variable_type, variable_name)) code.extend( [ ", string outputColumn", ")\n{" ] ) # Generate assigning the values in the constructor for variable_name in variable_names: code.append("\n_{0} = {0};".format(variable_name)) # Add assignments that are always required code.extend( [ "\n_outputColumn = outputColumn;", "\n_host = env.Register(nameof({0}));".format(estimator_class_name), "\n}" ] ) return "".join(code)
19366e1e25befa2e0723604d31f0f59b602b9b51
15,829
import requests def download_torrent_file(torrent_file_url): """ Функция скачает по ссылке торрент файл и вернет его название. Если не получится, вернет None """ user_agent = 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)' rs = requests.get(torrent_file_url, headers={'User-Agent': user_agent}) if not rs.ok: print('Не получилось скачать: {}\n\n{}'.format(rs.status_code, rs.text)) return # Теперь нужно вытащить название торрент-файла file_name = rs.headers['Content-Disposition'] file_name = file_name.replace('attachment; filename=', '').replace('"', '') with open(file_name, 'wb') as f: f.write(rs.text.encode()) return file_name
41f7ef4a03266b16a7a1d8e1add4b13ecc2e0ea6
15,831